python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Pistachio SoC Reset Controller driver
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*
* Author: Damien Horsley <[email protected]>
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/reset/pistachio-resets.h>
#define PISTACHIO_SOFT_RESET 0
struct pistachio_reset_data {
struct reset_controller_dev rcdev;
struct regmap *periph_regs;
};
static inline int pistachio_reset_shift(unsigned long id)
{
switch (id) {
case PISTACHIO_RESET_I2C0:
case PISTACHIO_RESET_I2C1:
case PISTACHIO_RESET_I2C2:
case PISTACHIO_RESET_I2C3:
case PISTACHIO_RESET_I2S_IN:
case PISTACHIO_RESET_PRL_OUT:
case PISTACHIO_RESET_SPDIF_OUT:
case PISTACHIO_RESET_SPI:
case PISTACHIO_RESET_PWM_PDM:
case PISTACHIO_RESET_UART0:
case PISTACHIO_RESET_UART1:
case PISTACHIO_RESET_QSPI:
case PISTACHIO_RESET_MDC:
case PISTACHIO_RESET_SDHOST:
case PISTACHIO_RESET_ETHERNET:
case PISTACHIO_RESET_IR:
case PISTACHIO_RESET_HASH:
case PISTACHIO_RESET_TIMER:
return id;
case PISTACHIO_RESET_I2S_OUT:
case PISTACHIO_RESET_SPDIF_IN:
case PISTACHIO_RESET_EVT:
return id + 6;
case PISTACHIO_RESET_USB_H:
case PISTACHIO_RESET_USB_PR:
case PISTACHIO_RESET_USB_PHY_PR:
case PISTACHIO_RESET_USB_PHY_PON:
return id + 7;
default:
return -EINVAL;
}
}
static int pistachio_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct pistachio_reset_data *rd;
u32 mask;
int shift;
rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
shift = pistachio_reset_shift(id);
if (shift < 0)
return shift;
mask = BIT(shift);
return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
mask, mask);
}
static int pistachio_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct pistachio_reset_data *rd;
u32 mask;
int shift;
rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
shift = pistachio_reset_shift(id);
if (shift < 0)
return shift;
mask = BIT(shift);
return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
mask, 0);
}
static const struct reset_control_ops pistachio_reset_ops = {
.assert = pistachio_reset_assert,
.deassert = pistachio_reset_deassert,
};
static int pistachio_reset_probe(struct platform_device *pdev)
{
struct pistachio_reset_data *rd;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->periph_regs = syscon_node_to_regmap(np->parent);
if (IS_ERR(rd->periph_regs))
return PTR_ERR(rd->periph_regs);
rd->rcdev.owner = THIS_MODULE;
rd->rcdev.nr_resets = PISTACHIO_RESET_MAX + 1;
rd->rcdev.ops = &pistachio_reset_ops;
rd->rcdev.of_node = np;
return devm_reset_controller_register(dev, &rd->rcdev);
}
static const struct of_device_id pistachio_reset_dt_ids[] = {
{ .compatible = "img,pistachio-reset", },
{ /* sentinel */ },
};
static struct platform_driver pistachio_reset_driver = {
.probe = pistachio_reset_probe,
.driver = {
.name = "pistachio-reset",
.of_match_table = pistachio_reset_dt_ids,
},
};
builtin_platform_driver(pistachio_reset_driver);
| linux-master | drivers/reset/reset-pistachio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <soc/canaan/k210-sysctl.h>
#include <dt-bindings/reset/k210-rst.h>
#define K210_RST_MASK 0x27FFFFFF
struct k210_rst {
struct regmap *map;
struct reset_controller_dev rcdev;
};
static inline struct k210_rst *
to_k210_rst(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct k210_rst, rcdev);
}
static inline int k210_rst_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct k210_rst *ksr = to_k210_rst(rcdev);
return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 1);
}
static inline int k210_rst_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct k210_rst *ksr = to_k210_rst(rcdev);
return regmap_update_bits(ksr->map, K210_SYSCTL_PERI_RESET, BIT(id), 0);
}
static int k210_rst_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
int ret;
ret = k210_rst_assert(rcdev, id);
if (ret == 0) {
udelay(10);
ret = k210_rst_deassert(rcdev, id);
}
return ret;
}
static int k210_rst_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct k210_rst *ksr = to_k210_rst(rcdev);
u32 reg, bit = BIT(id);
int ret;
ret = regmap_read(ksr->map, K210_SYSCTL_PERI_RESET, ®);
if (ret)
return ret;
return reg & bit;
}
static int k210_rst_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
unsigned long id = reset_spec->args[0];
if (!(BIT(id) & K210_RST_MASK))
return -EINVAL;
return id;
}
static const struct reset_control_ops k210_rst_ops = {
.assert = k210_rst_assert,
.deassert = k210_rst_deassert,
.reset = k210_rst_reset,
.status = k210_rst_status,
};
static int k210_rst_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *parent_np = of_get_parent(dev->of_node);
struct k210_rst *ksr;
dev_info(dev, "K210 reset controller\n");
ksr = devm_kzalloc(dev, sizeof(*ksr), GFP_KERNEL);
if (!ksr)
return -ENOMEM;
ksr->map = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(ksr->map))
return PTR_ERR(ksr->map);
ksr->rcdev.owner = THIS_MODULE;
ksr->rcdev.dev = dev;
ksr->rcdev.of_node = dev->of_node;
ksr->rcdev.ops = &k210_rst_ops;
ksr->rcdev.nr_resets = fls(K210_RST_MASK);
ksr->rcdev.of_reset_n_cells = 1;
ksr->rcdev.of_xlate = k210_rst_xlate;
return devm_reset_controller_register(dev, &ksr->rcdev);
}
static const struct of_device_id k210_rst_dt_ids[] = {
{ .compatible = "canaan,k210-rst" },
{ /* sentinel */ },
};
static struct platform_driver k210_rst_driver = {
.probe = k210_rst_probe,
.driver = {
.name = "k210-rst",
.of_match_table = k210_rst_dt_ids,
},
};
builtin_platform_driver(k210_rst_driver);
| linux-master | drivers/reset/reset-k210.c |
// SPDX-License-Identifier: GPL-2.0+
/* Microchip Sparx5 Switch Reset driver
*
* Copyright (c) 2020 Microchip Technology Inc. and its subsidiaries.
*
* The Sparx5 Chip Register Model can be browsed at this location:
* https://github.com/microchip-ung/sparx-5_reginfo
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
struct reset_props {
u32 protect_reg;
u32 protect_bit;
u32 reset_reg;
u32 reset_bit;
};
struct mchp_reset_context {
struct regmap *cpu_ctrl;
struct regmap *gcb_ctrl;
struct reset_controller_dev rcdev;
const struct reset_props *props;
};
static struct regmap_config sparx5_reset_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
static int sparx5_switch_reset(struct mchp_reset_context *ctx)
{
u32 val;
/* Make sure the core is PROTECTED from reset */
regmap_update_bits(ctx->cpu_ctrl, ctx->props->protect_reg,
ctx->props->protect_bit, ctx->props->protect_bit);
/* Start soft reset */
regmap_write(ctx->gcb_ctrl, ctx->props->reset_reg,
ctx->props->reset_bit);
/* Wait for soft reset done */
return regmap_read_poll_timeout(ctx->gcb_ctrl, ctx->props->reset_reg, val,
(val & ctx->props->reset_bit) == 0,
1, 100);
}
static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
unsigned long id)
{
return 0;
}
static const struct reset_control_ops sparx5_reset_ops = {
.reset = sparx5_reset_noop,
};
static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
struct regmap **target)
{
struct device_node *syscon_np;
struct regmap *regmap;
int err;
syscon_np = of_parse_phandle(pdev->dev.of_node, name, 0);
if (!syscon_np)
return -ENODEV;
regmap = syscon_node_to_regmap(syscon_np);
of_node_put(syscon_np);
if (IS_ERR(regmap)) {
err = PTR_ERR(regmap);
dev_err(&pdev->dev, "No '%s' map: %d\n", name, err);
return err;
}
*target = regmap;
return 0;
}
static int mchp_sparx5_map_io(struct platform_device *pdev, int index,
struct regmap **target)
{
struct resource *res;
struct regmap *map;
void __iomem *mem;
mem = devm_platform_get_and_ioremap_resource(pdev, index, &res);
if (IS_ERR(mem)) {
dev_err(&pdev->dev, "Could not map resource %d\n", index);
return PTR_ERR(mem);
}
sparx5_reset_regmap_config.name = res->name;
map = devm_regmap_init_mmio(&pdev->dev, mem, &sparx5_reset_regmap_config);
if (IS_ERR(map))
return PTR_ERR(map);
*target = map;
return 0;
}
static int mchp_sparx5_reset_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct mchp_reset_context *ctx;
int err;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
err = mchp_sparx5_map_syscon(pdev, "cpu-syscon", &ctx->cpu_ctrl);
if (err)
return err;
err = mchp_sparx5_map_io(pdev, 0, &ctx->gcb_ctrl);
if (err)
return err;
ctx->rcdev.owner = THIS_MODULE;
ctx->rcdev.nr_resets = 1;
ctx->rcdev.ops = &sparx5_reset_ops;
ctx->rcdev.of_node = dn;
ctx->props = device_get_match_data(&pdev->dev);
/* Issue the reset very early, our actual reset callback is a noop. */
err = sparx5_switch_reset(ctx);
if (err)
return err;
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
static const struct reset_props reset_props_sparx5 = {
.protect_reg = 0x84,
.protect_bit = BIT(10),
.reset_reg = 0x0,
.reset_bit = BIT(1),
};
static const struct reset_props reset_props_lan966x = {
.protect_reg = 0x88,
.protect_bit = BIT(5),
.reset_reg = 0x0,
.reset_bit = BIT(1),
};
static const struct of_device_id mchp_sparx5_reset_of_match[] = {
{
.compatible = "microchip,sparx5-switch-reset",
.data = &reset_props_sparx5,
}, {
.compatible = "microchip,lan966x-switch-reset",
.data = &reset_props_lan966x,
},
{ }
};
static struct platform_driver mchp_sparx5_reset_driver = {
.probe = mchp_sparx5_reset_probe,
.driver = {
.name = "sparx5-switch-reset",
.of_match_table = mchp_sparx5_reset_of_match,
},
};
static int __init mchp_sparx5_reset_init(void)
{
return platform_driver_register(&mchp_sparx5_reset_driver);
}
/*
* Because this is a global reset, keep this postcore_initcall() to issue the
* reset as early as possible during the kernel startup.
*/
postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
MODULE_AUTHOR("Steen Hegelund <[email protected]>");
| linux-master | drivers/reset/reset-microchip-sparx5.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
struct qcom_aoss_reset_map {
unsigned int reg;
};
struct qcom_aoss_desc {
const struct qcom_aoss_reset_map *resets;
size_t num_resets;
};
struct qcom_aoss_reset_data {
struct reset_controller_dev rcdev;
void __iomem *base;
const struct qcom_aoss_desc *desc;
};
static const struct qcom_aoss_reset_map sdm845_aoss_resets[] = {
[AOSS_CC_MSS_RESTART] = {0x10000},
[AOSS_CC_CAMSS_RESTART] = {0x11000},
[AOSS_CC_VENUS_RESTART] = {0x12000},
[AOSS_CC_GPU_RESTART] = {0x13000},
[AOSS_CC_DISPSS_RESTART] = {0x14000},
[AOSS_CC_WCSS_RESTART] = {0x20000},
[AOSS_CC_LPASS_RESTART] = {0x30000},
};
static const struct qcom_aoss_desc sdm845_aoss_desc = {
.resets = sdm845_aoss_resets,
.num_resets = ARRAY_SIZE(sdm845_aoss_resets),
};
static inline struct qcom_aoss_reset_data *to_qcom_aoss_reset_data(
struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct qcom_aoss_reset_data, rcdev);
}
static int qcom_aoss_control_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
struct qcom_aoss_reset_data *data = to_qcom_aoss_reset_data(rcdev);
const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
writel(1, data->base + map->reg);
/* Wait 6 32kHz sleep cycles for reset */
usleep_range(200, 300);
return 0;
}
static int qcom_aoss_control_deassert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
struct qcom_aoss_reset_data *data = to_qcom_aoss_reset_data(rcdev);
const struct qcom_aoss_reset_map *map = &data->desc->resets[idx];
writel(0, data->base + map->reg);
/* Wait 6 32kHz sleep cycles for reset */
usleep_range(200, 300);
return 0;
}
static int qcom_aoss_control_reset(struct reset_controller_dev *rcdev,
unsigned long idx)
{
qcom_aoss_control_assert(rcdev, idx);
return qcom_aoss_control_deassert(rcdev, idx);
}
static const struct reset_control_ops qcom_aoss_reset_ops = {
.reset = qcom_aoss_control_reset,
.assert = qcom_aoss_control_assert,
.deassert = qcom_aoss_control_deassert,
};
static int qcom_aoss_reset_probe(struct platform_device *pdev)
{
struct qcom_aoss_reset_data *data;
struct device *dev = &pdev->dev;
const struct qcom_aoss_desc *desc;
struct resource *res;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->desc = desc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->rcdev.owner = THIS_MODULE;
data->rcdev.ops = &qcom_aoss_reset_ops;
data->rcdev.nr_resets = desc->num_resets;
data->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct of_device_id qcom_aoss_reset_of_match[] = {
{ .compatible = "qcom,sdm845-aoss-cc", .data = &sdm845_aoss_desc },
{}
};
MODULE_DEVICE_TABLE(of, qcom_aoss_reset_of_match);
static struct platform_driver qcom_aoss_reset_driver = {
.probe = qcom_aoss_reset_probe,
.driver = {
.name = "qcom_aoss_reset",
.of_match_table = qcom_aoss_reset_of_match,
},
};
module_platform_driver(qcom_aoss_reset_driver);
MODULE_DESCRIPTION("Qualcomm AOSS Reset Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-qcom-aoss.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM System Control and Management Interface (ARM SCMI) reset driver
*
* Copyright (C) 2019-2021 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/reset-controller.h>
#include <linux/scmi_protocol.h>
static const struct scmi_reset_proto_ops *reset_ops;
/**
* struct scmi_reset_data - reset controller information structure
* @rcdev: reset controller entity
* @ph: ARM SCMI protocol handle used for communication with system controller
*/
struct scmi_reset_data {
struct reset_controller_dev rcdev;
const struct scmi_protocol_handle *ph;
};
#define to_scmi_reset_data(p) container_of((p), struct scmi_reset_data, rcdev)
#define to_scmi_handle(p) (to_scmi_reset_data(p)->ph)
/**
* scmi_reset_assert() - assert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be asserted
*
* This function implements the reset driver op to assert a device's reset
* using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
return reset_ops->assert(ph, id);
}
/**
* scmi_reset_deassert() - deassert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be deasserted
*
* This function implements the reset driver op to deassert a device's reset
* using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
return reset_ops->deassert(ph, id);
}
/**
* scmi_reset_reset() - reset the device
* @rcdev: reset controller entity
* @id: ID of the reset signal to be reset(assert + deassert)
*
* This function implements the reset driver op to trigger a device's
* reset signal using the ARM SCMI protocol.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int
scmi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
const struct scmi_protocol_handle *ph = to_scmi_handle(rcdev);
return reset_ops->reset(ph, id);
}
static const struct reset_control_ops scmi_reset_ops = {
.assert = scmi_reset_assert,
.deassert = scmi_reset_deassert,
.reset = scmi_reset_reset,
};
static int scmi_reset_probe(struct scmi_device *sdev)
{
struct scmi_reset_data *data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
if (!handle)
return -ENODEV;
reset_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_RESET, &ph);
if (IS_ERR(reset_ops))
return PTR_ERR(reset_ops);
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->rcdev.ops = &scmi_reset_ops;
data->rcdev.owner = THIS_MODULE;
data->rcdev.of_node = np;
data->rcdev.nr_resets = reset_ops->num_domains_get(ph);
data->ph = ph;
return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_RESET, "reset" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_reset_driver = {
.name = "scmi-reset",
.probe = scmi_reset_probe,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_reset_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI reset controller driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-scmi.c |
// SPDX-License-Identifier: GPL-2.0
//
// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
// Copyright 2018 Socionext Inc.
// Author: Kunihiko Hayashi <[email protected]>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/reset/reset-simple.h>
#define MAX_CLKS 2
#define MAX_RSTS 2
struct uniphier_glue_reset_soc_data {
int nclks;
const char * const *clock_names;
int nrsts;
const char * const *reset_names;
};
struct uniphier_glue_reset_priv {
struct clk_bulk_data clk[MAX_CLKS];
struct reset_control_bulk_data rst[MAX_RSTS];
struct reset_simple_data rdata;
const struct uniphier_glue_reset_soc_data *data;
};
static void uniphier_clk_disable(void *_priv)
{
struct uniphier_glue_reset_priv *priv = _priv;
clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
}
static void uniphier_rst_assert(void *_priv)
{
struct uniphier_glue_reset_priv *priv = _priv;
reset_control_bulk_assert(priv->data->nrsts, priv->rst);
}
static int uniphier_glue_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_glue_reset_priv *priv;
struct resource *res;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->data = of_device_get_match_data(dev);
if (WARN_ON(!priv->data || priv->data->nclks > MAX_CLKS ||
priv->data->nrsts > MAX_RSTS))
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->rdata.membase = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->rdata.membase))
return PTR_ERR(priv->rdata.membase);
for (i = 0; i < priv->data->nclks; i++)
priv->clk[i].id = priv->data->clock_names[i];
ret = devm_clk_bulk_get(dev, priv->data->nclks, priv->clk);
if (ret)
return ret;
for (i = 0; i < priv->data->nrsts; i++)
priv->rst[i].id = priv->data->reset_names[i];
ret = devm_reset_control_bulk_get_shared(dev, priv->data->nrsts,
priv->rst);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, uniphier_clk_disable, priv);
if (ret)
return ret;
ret = reset_control_bulk_deassert(priv->data->nrsts, priv->rst);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, uniphier_rst_assert, priv);
if (ret)
return ret;
spin_lock_init(&priv->rdata.lock);
priv->rdata.rcdev.owner = THIS_MODULE;
priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
priv->rdata.rcdev.ops = &reset_simple_ops;
priv->rdata.rcdev.of_node = dev->of_node;
priv->rdata.active_low = true;
return devm_reset_controller_register(dev, &priv->rdata.rcdev);
}
static const char * const uniphier_pro4_clock_reset_names[] = {
"gio", "link",
};
static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
.nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
.clock_names = uniphier_pro4_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
.reset_names = uniphier_pro4_clock_reset_names,
};
static const char * const uniphier_pxs2_clock_reset_names[] = {
"link",
};
static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
.nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.clock_names = uniphier_pxs2_clock_reset_names,
.nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
.reset_names = uniphier_pxs2_clock_reset_names,
};
static const struct of_device_id uniphier_glue_reset_match[] = {
{
.compatible = "socionext,uniphier-pro4-usb3-reset",
.data = &uniphier_pro4_data,
},
{
.compatible = "socionext,uniphier-pro5-usb3-reset",
.data = &uniphier_pro4_data,
},
{
.compatible = "socionext,uniphier-pxs2-usb3-reset",
.data = &uniphier_pxs2_data,
},
{
.compatible = "socionext,uniphier-ld20-usb3-reset",
.data = &uniphier_pxs2_data,
},
{
.compatible = "socionext,uniphier-pxs3-usb3-reset",
.data = &uniphier_pxs2_data,
},
{
.compatible = "socionext,uniphier-nx1-usb3-reset",
.data = &uniphier_pxs2_data,
},
{
.compatible = "socionext,uniphier-pro4-ahci-reset",
.data = &uniphier_pro4_data,
},
{
.compatible = "socionext,uniphier-pxs2-ahci-reset",
.data = &uniphier_pxs2_data,
},
{
.compatible = "socionext,uniphier-pxs3-ahci-reset",
.data = &uniphier_pxs2_data,
},
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
static struct platform_driver uniphier_glue_reset_driver = {
.probe = uniphier_glue_reset_probe,
.driver = {
.name = "uniphier-glue-reset",
.of_match_table = uniphier_glue_reset_match,
},
};
module_platform_driver(uniphier_glue_reset_driver);
MODULE_AUTHOR("Kunihiko Hayashi <[email protected]>");
MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-uniphier-glue.c |
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Marvell Berlin reset driver
*
* Antoine Tenart <[email protected]>
* Sebastian Hesselbarth <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/types.h>
#define BERLIN_MAX_RESETS 32
#define to_berlin_reset_priv(p) \
container_of((p), struct berlin_reset_priv, rcdev)
struct berlin_reset_priv {
struct regmap *regmap;
struct reset_controller_dev rcdev;
};
static int berlin_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct berlin_reset_priv *priv = to_berlin_reset_priv(rcdev);
int offset = id >> 8;
int mask = BIT(id & 0x1f);
regmap_write(priv->regmap, offset, mask);
/* let the reset be effective */
udelay(10);
return 0;
}
static const struct reset_control_ops berlin_reset_ops = {
.reset = berlin_reset_reset,
};
static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
unsigned int offset, bit;
offset = reset_spec->args[0];
bit = reset_spec->args[1];
if (bit >= BERLIN_MAX_RESETS)
return -EINVAL;
return (offset << 8) | bit;
}
static int berlin2_reset_probe(struct platform_device *pdev)
{
struct device_node *parent_np = of_get_parent(pdev->dev.of_node);
struct berlin_reset_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->regmap = syscon_node_to_regmap(parent_np);
of_node_put(parent_np);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.ops = &berlin_reset_ops;
priv->rcdev.of_node = pdev->dev.of_node;
priv->rcdev.of_reset_n_cells = 2;
priv->rcdev.of_xlate = berlin_reset_xlate;
return reset_controller_register(&priv->rcdev);
}
static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
.driver = {
.name = "berlin2-reset",
.of_match_table = berlin_reset_dt_match,
},
};
module_platform_driver(berlin_reset_driver);
MODULE_AUTHOR("Antoine Tenart <[email protected]>");
MODULE_AUTHOR("Sebastian Hesselbarth <[email protected]>");
MODULE_DESCRIPTION("Synaptics Berlin reset controller");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-berlin.c |
/*
* Copyright (C) 2017 Synopsys.
*
* Synopsys AXS10x reset driver.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#define to_axs10x_rst(p) container_of((p), struct axs10x_rst, rcdev)
#define AXS10X_MAX_RESETS 32
struct axs10x_rst {
void __iomem *regs_rst;
spinlock_t lock;
struct reset_controller_dev rcdev;
};
static int axs10x_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct axs10x_rst *rst = to_axs10x_rst(rcdev);
unsigned long flags;
spin_lock_irqsave(&rst->lock, flags);
writel(BIT(id), rst->regs_rst);
spin_unlock_irqrestore(&rst->lock, flags);
return 0;
}
static const struct reset_control_ops axs10x_reset_ops = {
.reset = axs10x_reset_reset,
};
static int axs10x_reset_probe(struct platform_device *pdev)
{
struct axs10x_rst *rst;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
if (!rst)
return -ENOMEM;
rst->regs_rst = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rst->regs_rst))
return PTR_ERR(rst->regs_rst);
spin_lock_init(&rst->lock);
rst->rcdev.owner = THIS_MODULE;
rst->rcdev.ops = &axs10x_reset_ops;
rst->rcdev.of_node = pdev->dev.of_node;
rst->rcdev.nr_resets = AXS10X_MAX_RESETS;
return devm_reset_controller_register(&pdev->dev, &rst->rcdev);
}
static const struct of_device_id axs10x_reset_dt_match[] = {
{ .compatible = "snps,axs10x-reset" },
{ },
};
static struct platform_driver axs10x_reset_driver = {
.probe = axs10x_reset_probe,
.driver = {
.name = "axs10x-reset",
.of_match_table = axs10x_reset_dt_match,
},
};
builtin_platform_driver(axs10x_reset_driver);
MODULE_AUTHOR("Eugeniy Paltsev <[email protected]>");
MODULE_DESCRIPTION("Synopsys AXS10x reset driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-axs10x.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 Socionext Inc.
* Author: Masahiro Yamada <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
struct uniphier_reset_data {
unsigned int id;
unsigned int reg;
unsigned int bit;
unsigned int flags;
#define UNIPHIER_RESET_ACTIVE_LOW BIT(0)
};
#define UNIPHIER_RESET_ID_END ((unsigned int)(-1))
#define UNIPHIER_RESET_END \
{ .id = UNIPHIER_RESET_ID_END }
#define UNIPHIER_RESET(_id, _reg, _bit) \
{ \
.id = (_id), \
.reg = (_reg), \
.bit = (_bit), \
}
#define UNIPHIER_RESETX(_id, _reg, _bit) \
{ \
.id = (_id), \
.reg = (_reg), \
.bit = (_bit), \
.flags = UNIPHIER_RESET_ACTIVE_LOW, \
}
/* System reset data */
static const struct uniphier_reset_data uniphier_ld4_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (Ether, HSC, MIO) */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */
UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */
UNIPHIER_RESETX(28, 0x2000, 18), /* SATA0 */
UNIPHIER_RESETX(29, 0x2004, 18), /* SATA1 */
UNIPHIER_RESETX(30, 0x2000, 19), /* SATA-PHY */
UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC) */
UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (PCIe, USB3) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */
UNIPHIER_RESETX(24, 0x2008, 2), /* PCIe */
UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */
UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */
UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */
UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */
UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */
UNIPHIER_RESET(30, 0x2014, 8), /* SATA-PHY (active high) */
UNIPHIER_RESETX(40, 0x2000, 13), /* AIO */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */
UNIPHIER_RESETX(9, 0x200c, 9), /* HSC */
UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */
UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
UNIPHIER_RESETX(9, 0x200c, 9), /* HSC */
UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */
UNIPHIER_RESETX(24, 0x200c, 4), /* PCIe */
UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */
UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */
UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */
UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link */
UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link */
UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
UNIPHIER_RESETX(20, 0x200c, 17), /* USB31-PHY0 */
UNIPHIER_RESETX(21, 0x200c, 19), /* USB31-PHY1 */
UNIPHIER_RESETX(24, 0x200c, 3), /* PCIe */
UNIPHIER_RESETX(28, 0x200c, 7), /* SATA0 */
UNIPHIER_RESETX(29, 0x200c, 8), /* SATA1 */
UNIPHIER_RESETX(30, 0x200c, 21), /* SATA-PHY */
UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_nx1_sys_reset_data[] = {
UNIPHIER_RESETX(4, 0x2008, 8), /* eMMC */
UNIPHIER_RESETX(6, 0x200c, 0), /* Ether */
UNIPHIER_RESETX(12, 0x200c, 16), /* USB30 link */
UNIPHIER_RESETX(16, 0x200c, 24), /* USB30-PHY0 */
UNIPHIER_RESETX(17, 0x200c, 25), /* USB30-PHY1 */
UNIPHIER_RESETX(18, 0x200c, 26), /* USB30-PHY2 */
UNIPHIER_RESETX(24, 0x200c, 8), /* PCIe */
UNIPHIER_RESETX(52, 0x2010, 0), /* VOC */
UNIPHIER_RESETX(58, 0x2010, 8), /* HDMI-Tx */
UNIPHIER_RESET_END,
};
/* Media I/O reset data */
#define UNIPHIER_MIO_RESET_SD(id, ch) \
UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
#define UNIPHIER_MIO_RESET_SD_BRIDGE(id, ch) \
UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 26)
#define UNIPHIER_MIO_RESET_EMMC_HW_RESET(id, ch) \
UNIPHIER_RESETX((id), 0x80 + 0x200 * (ch), 0)
#define UNIPHIER_MIO_RESET_USB2(id, ch) \
UNIPHIER_RESETX((id), 0x114 + 0x200 * (ch), 0)
#define UNIPHIER_MIO_RESET_USB2_BRIDGE(id, ch) \
UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 24)
#define UNIPHIER_MIO_RESET_DMAC(id) \
UNIPHIER_RESETX((id), 0x110, 17)
static const struct uniphier_reset_data uniphier_ld4_mio_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_SD(2, 2),
UNIPHIER_MIO_RESET_SD_BRIDGE(3, 0),
UNIPHIER_MIO_RESET_SD_BRIDGE(4, 1),
UNIPHIER_MIO_RESET_SD_BRIDGE(5, 2),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
UNIPHIER_MIO_RESET_DMAC(7),
UNIPHIER_MIO_RESET_USB2(8, 0),
UNIPHIER_MIO_RESET_USB2(9, 1),
UNIPHIER_MIO_RESET_USB2(10, 2),
UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0),
UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1),
UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2),
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
UNIPHIER_RESET_END,
};
/* Peripheral reset data */
#define UNIPHIER_PERI_RESET_UART(id, ch) \
UNIPHIER_RESETX((id), 0x114, 19 + (ch))
#define UNIPHIER_PERI_RESET_I2C(id, ch) \
UNIPHIER_RESETX((id), 0x114, 5 + (ch))
#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
UNIPHIER_RESETX((id), 0x114, 24 + (ch))
#define UNIPHIER_PERI_RESET_SCSSI(id, ch) \
UNIPHIER_RESETX((id), 0x110, 17 + (ch))
#define UNIPHIER_PERI_RESET_MCSSI(id) \
UNIPHIER_RESETX((id), 0x114, 14)
static const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_UART(0, 0),
UNIPHIER_PERI_RESET_UART(1, 1),
UNIPHIER_PERI_RESET_UART(2, 2),
UNIPHIER_PERI_RESET_UART(3, 3),
UNIPHIER_PERI_RESET_I2C(4, 0),
UNIPHIER_PERI_RESET_I2C(5, 1),
UNIPHIER_PERI_RESET_I2C(6, 2),
UNIPHIER_PERI_RESET_I2C(7, 3),
UNIPHIER_PERI_RESET_I2C(8, 4),
UNIPHIER_PERI_RESET_SCSSI(11, 0),
UNIPHIER_RESET_END,
};
static const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
UNIPHIER_PERI_RESET_UART(0, 0),
UNIPHIER_PERI_RESET_UART(1, 1),
UNIPHIER_PERI_RESET_UART(2, 2),
UNIPHIER_PERI_RESET_UART(3, 3),
UNIPHIER_PERI_RESET_FI2C(4, 0),
UNIPHIER_PERI_RESET_FI2C(5, 1),
UNIPHIER_PERI_RESET_FI2C(6, 2),
UNIPHIER_PERI_RESET_FI2C(7, 3),
UNIPHIER_PERI_RESET_FI2C(8, 4),
UNIPHIER_PERI_RESET_FI2C(9, 5),
UNIPHIER_PERI_RESET_FI2C(10, 6),
UNIPHIER_PERI_RESET_SCSSI(11, 0),
UNIPHIER_PERI_RESET_SCSSI(12, 1),
UNIPHIER_PERI_RESET_SCSSI(13, 2),
UNIPHIER_PERI_RESET_SCSSI(14, 3),
UNIPHIER_PERI_RESET_MCSSI(15),
UNIPHIER_RESET_END,
};
/* Analog signal amplifiers reset data */
static const struct uniphier_reset_data uniphier_ld11_adamv_reset_data[] = {
UNIPHIER_RESETX(0, 0x10, 6), /* EVEA */
UNIPHIER_RESET_END,
};
/* core implementaton */
struct uniphier_reset_priv {
struct reset_controller_dev rcdev;
struct device *dev;
struct regmap *regmap;
const struct uniphier_reset_data *data;
};
#define to_uniphier_reset_priv(_rcdev) \
container_of(_rcdev, struct uniphier_reset_priv, rcdev)
static int uniphier_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, int assert)
{
struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
const struct uniphier_reset_data *p;
for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
unsigned int mask, val;
if (p->id != id)
continue;
mask = BIT(p->bit);
if (assert)
val = mask;
else
val = ~mask;
if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
val = ~val;
return regmap_write_bits(priv->regmap, p->reg, mask, val);
}
dev_err(priv->dev, "reset_id=%lu was not handled\n", id);
return -EINVAL;
}
static int uniphier_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return uniphier_reset_update(rcdev, id, 1);
}
static int uniphier_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return uniphier_reset_update(rcdev, id, 0);
}
static int uniphier_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
const struct uniphier_reset_data *p;
for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
unsigned int val;
int ret, asserted;
if (p->id != id)
continue;
ret = regmap_read(priv->regmap, p->reg, &val);
if (ret)
return ret;
asserted = !!(val & BIT(p->bit));
if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
asserted = !asserted;
return asserted;
}
dev_err(priv->dev, "reset_id=%lu was not found\n", id);
return -EINVAL;
}
static const struct reset_control_ops uniphier_reset_ops = {
.assert = uniphier_reset_assert,
.deassert = uniphier_reset_deassert,
.status = uniphier_reset_status,
};
static int uniphier_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_reset_priv *priv;
const struct uniphier_reset_data *p, *data;
struct regmap *regmap;
struct device_node *parent;
unsigned int nr_resets = 0;
data = of_device_get_match_data(dev);
if (WARN_ON(!data))
return -EINVAL;
parent = of_get_parent(dev->of_node); /* parent should be syscon node */
regmap = syscon_node_to_regmap(parent);
of_node_put(parent);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap (error %ld)\n",
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
for (p = data; p->id != UNIPHIER_RESET_ID_END; p++)
nr_resets = max(nr_resets, p->id + 1);
priv->rcdev.ops = &uniphier_reset_ops;
priv->rcdev.owner = dev->driver->owner;
priv->rcdev.of_node = dev->of_node;
priv->rcdev.nr_resets = nr_resets;
priv->dev = dev;
priv->regmap = regmap;
priv->data = data;
return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id uniphier_reset_match[] = {
/* System reset */
{
.compatible = "socionext,uniphier-ld4-reset",
.data = uniphier_ld4_sys_reset_data,
},
{
.compatible = "socionext,uniphier-pro4-reset",
.data = uniphier_pro4_sys_reset_data,
},
{
.compatible = "socionext,uniphier-sld8-reset",
.data = uniphier_ld4_sys_reset_data,
},
{
.compatible = "socionext,uniphier-pro5-reset",
.data = uniphier_pro5_sys_reset_data,
},
{
.compatible = "socionext,uniphier-pxs2-reset",
.data = uniphier_pxs2_sys_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-reset",
.data = uniphier_ld11_sys_reset_data,
},
{
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
{
.compatible = "socionext,uniphier-pxs3-reset",
.data = uniphier_pxs3_sys_reset_data,
},
{
.compatible = "socionext,uniphier-nx1-reset",
.data = uniphier_nx1_sys_reset_data,
},
/* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-ld4-mio-reset",
.data = uniphier_ld4_mio_reset_data,
},
{
.compatible = "socionext,uniphier-pro4-mio-reset",
.data = uniphier_ld4_mio_reset_data,
},
{
.compatible = "socionext,uniphier-sld8-mio-reset",
.data = uniphier_ld4_mio_reset_data,
},
{
.compatible = "socionext,uniphier-pro5-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-pxs2-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-reset",
.data = uniphier_ld4_mio_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-ld20-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-pxs3-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-nx1-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
/* Peripheral reset */
{
.compatible = "socionext,uniphier-ld4-peri-reset",
.data = uniphier_ld4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-pro4-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-sld8-peri-reset",
.data = uniphier_ld4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-pro5-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-pxs2-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-ld20-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-pxs3-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
{
.compatible = "socionext,uniphier-nx1-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
/* Analog signal amplifiers reset */
{
.compatible = "socionext,uniphier-ld11-adamv-reset",
.data = uniphier_ld11_adamv_reset_data,
},
{
.compatible = "socionext,uniphier-ld20-adamv-reset",
.data = uniphier_ld11_adamv_reset_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_reset_match);
static struct platform_driver uniphier_reset_driver = {
.probe = uniphier_reset_probe,
.driver = {
.name = "uniphier-reset",
.of_match_table = uniphier_reset_match,
},
};
module_platform_driver(uniphier_reset_driver);
MODULE_AUTHOR("Masahiro Yamada <[email protected]>");
MODULE_DESCRIPTION("UniPhier Reset Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-uniphier.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BCM6345 Reset Controller Driver
*
* Copyright (C) 2020 Álvaro Fernández Rojas <[email protected]>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#define BCM6345_RESET_NUM 32
#define BCM6345_RESET_SLEEP_MIN_US 10000
#define BCM6345_RESET_SLEEP_MAX_US 20000
struct bcm6345_reset {
struct reset_controller_dev rcdev;
void __iomem *base;
spinlock_t lock;
};
static inline struct bcm6345_reset *
to_bcm6345_reset(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct bcm6345_reset, rcdev);
}
static int bcm6345_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct bcm6345_reset *bcm6345_reset = to_bcm6345_reset(rcdev);
unsigned long flags;
uint32_t val;
spin_lock_irqsave(&bcm6345_reset->lock, flags);
val = __raw_readl(bcm6345_reset->base);
if (assert)
val &= ~BIT(id);
else
val |= BIT(id);
__raw_writel(val, bcm6345_reset->base);
spin_unlock_irqrestore(&bcm6345_reset->lock, flags);
return 0;
}
static int bcm6345_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return bcm6345_reset_update(rcdev, id, true);
}
static int bcm6345_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return bcm6345_reset_update(rcdev, id, false);
}
static int bcm6345_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
bcm6345_reset_update(rcdev, id, true);
usleep_range(BCM6345_RESET_SLEEP_MIN_US,
BCM6345_RESET_SLEEP_MAX_US);
bcm6345_reset_update(rcdev, id, false);
/*
* Ensure component is taken out reset state by sleeping also after
* deasserting the reset. Otherwise, the component may not be ready
* for operation.
*/
usleep_range(BCM6345_RESET_SLEEP_MIN_US,
BCM6345_RESET_SLEEP_MAX_US);
return 0;
}
static int bcm6345_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct bcm6345_reset *bcm6345_reset = to_bcm6345_reset(rcdev);
return !(__raw_readl(bcm6345_reset->base) & BIT(id));
}
static const struct reset_control_ops bcm6345_reset_ops = {
.assert = bcm6345_reset_assert,
.deassert = bcm6345_reset_deassert,
.reset = bcm6345_reset_reset,
.status = bcm6345_reset_status,
};
static int bcm6345_reset_probe(struct platform_device *pdev)
{
struct bcm6345_reset *bcm6345_reset;
bcm6345_reset = devm_kzalloc(&pdev->dev,
sizeof(*bcm6345_reset), GFP_KERNEL);
if (!bcm6345_reset)
return -ENOMEM;
bcm6345_reset->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bcm6345_reset->base))
return PTR_ERR(bcm6345_reset->base);
spin_lock_init(&bcm6345_reset->lock);
bcm6345_reset->rcdev.ops = &bcm6345_reset_ops;
bcm6345_reset->rcdev.owner = THIS_MODULE;
bcm6345_reset->rcdev.of_node = pdev->dev.of_node;
bcm6345_reset->rcdev.of_reset_n_cells = 1;
bcm6345_reset->rcdev.nr_resets = BCM6345_RESET_NUM;
return devm_reset_controller_register(&pdev->dev,
&bcm6345_reset->rcdev);
}
static const struct of_device_id bcm6345_reset_of_match[] = {
{ .compatible = "brcm,bcm6345-reset" },
{ /* sentinel */ },
};
static struct platform_driver bcm6345_reset_driver = {
.probe = bcm6345_reset_probe,
.driver = {
.name = "bcm6345-reset",
.of_match_table = bcm6345_reset_of_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(bcm6345_reset_driver);
| linux-master | drivers/reset/reset-bcm6345.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2020 Broadcom */
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#define BRCM_RESCAL_START 0x0
#define BRCM_RESCAL_START_BIT BIT(0)
#define BRCM_RESCAL_CTRL 0x4
#define BRCM_RESCAL_STATUS 0x8
#define BRCM_RESCAL_STATUS_BIT BIT(0)
struct brcm_rescal_reset {
void __iomem *base;
struct device *dev;
struct reset_controller_dev rcdev;
};
static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct brcm_rescal_reset *data =
container_of(rcdev, struct brcm_rescal_reset, rcdev);
void __iomem *base = data->base;
u32 reg;
int ret;
reg = readl(base + BRCM_RESCAL_START);
writel(reg | BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
reg = readl(base + BRCM_RESCAL_START);
if (!(reg & BRCM_RESCAL_START_BIT)) {
dev_err(data->dev, "failed to start SATA/PCIe rescal\n");
return -EIO;
}
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
if (ret) {
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
return ret;
}
reg = readl(base + BRCM_RESCAL_START);
writel(reg & ~BRCM_RESCAL_START_BIT, base + BRCM_RESCAL_START);
dev_dbg(data->dev, "SATA/PCIe rescal success\n");
return 0;
}
static int brcm_rescal_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
/* This is needed if #reset-cells == 0. */
return 0;
}
static const struct reset_control_ops brcm_rescal_reset_ops = {
.reset = brcm_rescal_reset_set,
};
static int brcm_rescal_reset_probe(struct platform_device *pdev)
{
struct brcm_rescal_reset *data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = 1;
data->rcdev.ops = &brcm_rescal_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
data->rcdev.of_xlate = brcm_rescal_reset_xlate;
data->dev = &pdev->dev;
return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static const struct of_device_id brcm_rescal_reset_of_match[] = {
{ .compatible = "brcm,bcm7216-pcie-sata-rescal" },
{ },
};
MODULE_DEVICE_TABLE(of, brcm_rescal_reset_of_match);
static struct platform_driver brcm_rescal_reset_driver = {
.probe = brcm_rescal_reset_probe,
.driver = {
.name = "brcm-rescal-reset",
.of_match_table = brcm_rescal_reset_of_match,
}
};
module_platform_driver(brcm_rescal_reset_driver);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("Broadcom SATA/PCIe rescal reset controller");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-brcmstb-rescal.c |
/*
* Copyright (C) 2017 Synopsys.
*
* Synopsys HSDK Development platform reset driver.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/types.h>
#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
struct hsdk_rst {
void __iomem *regs_ctl;
void __iomem *regs_rst;
spinlock_t lock;
struct reset_controller_dev rcdev;
};
static const u32 rst_map[] = {
BIT(16), /* APB_RST */
BIT(17), /* AXI_RST */
BIT(18), /* ETH_RST */
BIT(19), /* USB_RST */
BIT(20), /* SDIO_RST */
BIT(21), /* HDMI_RST */
BIT(22), /* GFX_RST */
BIT(25), /* DMAC_RST */
BIT(31), /* EBI_RST */
};
#define HSDK_MAX_RESETS ARRAY_SIZE(rst_map)
#define CGU_SYS_RST_CTRL 0x0
#define CGU_IP_SW_RESET 0x0
#define CGU_IP_SW_RESET_DELAY_SHIFT 16
#define CGU_IP_SW_RESET_DELAY_MASK GENMASK(31, CGU_IP_SW_RESET_DELAY_SHIFT)
#define CGU_IP_SW_RESET_DELAY 0
#define CGU_IP_SW_RESET_RESET BIT(0)
#define SW_RESET_TIMEOUT 10000
static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
{
writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
}
static int hsdk_reset_do(struct hsdk_rst *rst)
{
u32 reg;
reg = readl(rst->regs_rst + CGU_IP_SW_RESET);
reg &= ~CGU_IP_SW_RESET_DELAY_MASK;
reg |= CGU_IP_SW_RESET_DELAY << CGU_IP_SW_RESET_DELAY_SHIFT;
reg |= CGU_IP_SW_RESET_RESET;
writel(reg, rst->regs_rst + CGU_IP_SW_RESET);
/* wait till reset bit is back to 0 */
return readl_poll_timeout_atomic(rst->regs_rst + CGU_IP_SW_RESET, reg,
!(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
}
static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct hsdk_rst *rst = to_hsdk_rst(rcdev);
unsigned long flags;
int ret;
spin_lock_irqsave(&rst->lock, flags);
hsdk_reset_config(rst, id);
ret = hsdk_reset_do(rst);
spin_unlock_irqrestore(&rst->lock, flags);
return ret;
}
static const struct reset_control_ops hsdk_reset_ops = {
.reset = hsdk_reset_reset,
.deassert = hsdk_reset_reset,
};
static int hsdk_reset_probe(struct platform_device *pdev)
{
struct hsdk_rst *rst;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
if (!rst)
return -ENOMEM;
rst->regs_ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rst->regs_ctl))
return PTR_ERR(rst->regs_ctl);
rst->regs_rst = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(rst->regs_rst))
return PTR_ERR(rst->regs_rst);
spin_lock_init(&rst->lock);
rst->rcdev.owner = THIS_MODULE;
rst->rcdev.ops = &hsdk_reset_ops;
rst->rcdev.of_node = pdev->dev.of_node;
rst->rcdev.nr_resets = HSDK_MAX_RESETS;
rst->rcdev.of_reset_n_cells = 1;
return reset_controller_register(&rst->rcdev);
}
static const struct of_device_id hsdk_reset_dt_match[] = {
{ .compatible = "snps,hsdk-reset" },
{ },
};
static struct platform_driver hsdk_reset_driver = {
.probe = hsdk_reset_probe,
.driver = {
.name = "hsdk-reset",
.of_match_table = hsdk_reset_dt_match,
},
};
builtin_platform_driver(hsdk_reset_driver);
MODULE_AUTHOR("Eugeniy Paltsev <[email protected]>");
MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-hsdk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Reset Controller framework
*
* Copyright 2013 Philipp Zabel, Pengutronix
*/
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
static DEFINE_MUTEX(reset_list_mutex);
static LIST_HEAD(reset_controller_list);
static DEFINE_MUTEX(reset_lookup_mutex);
static LIST_HEAD(reset_lookup_list);
/**
* struct reset_control - a reset control
* @rcdev: a pointer to the reset controller device
* this reset control belongs to
* @list: list entry for the rcdev's reset controller list
* @id: ID of the reset controller in the reset
* controller device
* @refcnt: Number of gets of this reset_control
* @acquired: Only one reset_control may be acquired for a given rcdev and id.
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
* @array: Is this an array of reset controls (1)?
* @deassert_count: Number of times this reset line has been deasserted
* @triggered_count: Number of times this reset line has been reset. Currently
* only used for shared resets, which means that the value
* will be either 0 or 1.
*/
struct reset_control {
struct reset_controller_dev *rcdev;
struct list_head list;
unsigned int id;
struct kref refcnt;
bool acquired;
bool shared;
bool array;
atomic_t deassert_count;
atomic_t triggered_count;
};
/**
* struct reset_control_array - an array of reset controls
* @base: reset control for compatibility with reset control API functions
* @num_rstcs: number of reset controls
* @rstc: array of reset controls
*/
struct reset_control_array {
struct reset_control base;
unsigned int num_rstcs;
struct reset_control *rstc[];
};
static const char *rcdev_name(struct reset_controller_dev *rcdev)
{
if (rcdev->dev)
return dev_name(rcdev->dev);
if (rcdev->of_node)
return rcdev->of_node->full_name;
return NULL;
}
/**
* of_reset_simple_xlate - translate reset_spec to the reset line number
* @rcdev: a pointer to the reset controller device
* @reset_spec: reset line specifier as found in the device tree
*
* This static translation function is used by default if of_xlate in
* :c:type:`reset_controller_dev` is not set. It is useful for all reset
* controllers with 1:1 mapping, where reset lines can be indexed by number
* without gaps.
*/
static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
if (reset_spec->args[0] >= rcdev->nr_resets)
return -EINVAL;
return reset_spec->args[0];
}
/**
* reset_controller_register - register a reset controller device
* @rcdev: a pointer to the initialized reset controller device
*/
int reset_controller_register(struct reset_controller_dev *rcdev)
{
if (!rcdev->of_xlate) {
rcdev->of_reset_n_cells = 1;
rcdev->of_xlate = of_reset_simple_xlate;
}
INIT_LIST_HEAD(&rcdev->reset_control_head);
mutex_lock(&reset_list_mutex);
list_add(&rcdev->list, &reset_controller_list);
mutex_unlock(&reset_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(reset_controller_register);
/**
* reset_controller_unregister - unregister a reset controller device
* @rcdev: a pointer to the reset controller device
*/
void reset_controller_unregister(struct reset_controller_dev *rcdev)
{
mutex_lock(&reset_list_mutex);
list_del(&rcdev->list);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_controller_unregister);
static void devm_reset_controller_release(struct device *dev, void *res)
{
reset_controller_unregister(*(struct reset_controller_dev **)res);
}
/**
* devm_reset_controller_register - resource managed reset_controller_register()
* @dev: device that is registering this reset controller
* @rcdev: a pointer to the initialized reset controller device
*
* Managed reset_controller_register(). For reset controllers registered by
* this function, reset_controller_unregister() is automatically called on
* driver detach. See reset_controller_register() for more information.
*/
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev)
{
struct reset_controller_dev **rcdevp;
int ret;
rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
GFP_KERNEL);
if (!rcdevp)
return -ENOMEM;
ret = reset_controller_register(rcdev);
if (ret) {
devres_free(rcdevp);
return ret;
}
*rcdevp = rcdev;
devres_add(dev, rcdevp);
return ret;
}
EXPORT_SYMBOL_GPL(devm_reset_controller_register);
/**
* reset_controller_add_lookup - register a set of lookup entries
* @lookup: array of reset lookup entries
* @num_entries: number of entries in the lookup array
*/
void reset_controller_add_lookup(struct reset_control_lookup *lookup,
unsigned int num_entries)
{
struct reset_control_lookup *entry;
unsigned int i;
mutex_lock(&reset_lookup_mutex);
for (i = 0; i < num_entries; i++) {
entry = &lookup[i];
if (!entry->dev_id || !entry->provider) {
pr_warn("%s(): reset lookup entry badly specified, skipping\n",
__func__);
continue;
}
list_add_tail(&entry->list, &reset_lookup_list);
}
mutex_unlock(&reset_lookup_mutex);
}
EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
static inline struct reset_control_array *
rstc_to_array(struct reset_control *rstc) {
return container_of(rstc, struct reset_control_array, base);
}
static int reset_control_array_reset(struct reset_control_array *resets)
{
int ret, i;
for (i = 0; i < resets->num_rstcs; i++) {
ret = reset_control_reset(resets->rstc[i]);
if (ret)
return ret;
}
return 0;
}
static int reset_control_array_rearm(struct reset_control_array *resets)
{
struct reset_control *rstc;
int i;
for (i = 0; i < resets->num_rstcs; i++) {
rstc = resets->rstc[i];
if (!rstc)
continue;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (rstc->shared) {
if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
return -EINVAL;
} else {
if (!rstc->acquired)
return -EPERM;
}
}
for (i = 0; i < resets->num_rstcs; i++) {
rstc = resets->rstc[i];
if (rstc && rstc->shared)
WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
}
return 0;
}
static int reset_control_array_assert(struct reset_control_array *resets)
{
int ret, i;
for (i = 0; i < resets->num_rstcs; i++) {
ret = reset_control_assert(resets->rstc[i]);
if (ret)
goto err;
}
return 0;
err:
while (i--)
reset_control_deassert(resets->rstc[i]);
return ret;
}
static int reset_control_array_deassert(struct reset_control_array *resets)
{
int ret, i;
for (i = 0; i < resets->num_rstcs; i++) {
ret = reset_control_deassert(resets->rstc[i]);
if (ret)
goto err;
}
return 0;
err:
while (i--)
reset_control_assert(resets->rstc[i]);
return ret;
}
static int reset_control_array_acquire(struct reset_control_array *resets)
{
unsigned int i;
int err;
for (i = 0; i < resets->num_rstcs; i++) {
err = reset_control_acquire(resets->rstc[i]);
if (err < 0)
goto release;
}
return 0;
release:
while (i--)
reset_control_release(resets->rstc[i]);
return err;
}
static void reset_control_array_release(struct reset_control_array *resets)
{
unsigned int i;
for (i = 0; i < resets->num_rstcs; i++)
reset_control_release(resets->rstc[i]);
}
static inline bool reset_control_is_array(struct reset_control *rstc)
{
return rstc->array;
}
/**
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*
* On a shared reset line the actual reset pulse is only triggered once for the
* lifetime of the reset_control instance: for all but the first caller this is
* a no-op.
* Consumers must not use reset_control_(de)assert on shared reset lines when
* reset_control_reset has been used.
*
* If rstc is NULL it is an optional reset and the function will just
* return 0.
*/
int reset_control_reset(struct reset_control *rstc)
{
int ret;
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (reset_control_is_array(rstc))
return reset_control_array_reset(rstc_to_array(rstc));
if (!rstc->rcdev->ops->reset)
return -ENOTSUPP;
if (rstc->shared) {
if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
return -EINVAL;
if (atomic_inc_return(&rstc->triggered_count) != 1)
return 0;
} else {
if (!rstc->acquired)
return -EPERM;
}
ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
if (rstc->shared && ret)
atomic_dec(&rstc->triggered_count);
return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
* reset_control_bulk_reset - reset the controlled devices in order
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*
* Issue a reset on all provided reset controls, in order.
*
* See also: reset_control_reset()
*/
int reset_control_bulk_reset(int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
int ret, i;
for (i = 0; i < num_rstcs; i++) {
ret = reset_control_reset(rstcs[i].rstc);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
/**
* reset_control_rearm - allow shared reset line to be re-triggered"
* @rstc: reset controller
*
* On a shared reset line the actual reset pulse is only triggered once for the
* lifetime of the reset_control instance, except if this call is used.
*
* Calls to this function must be balanced with calls to reset_control_reset,
* a warning is thrown in case triggered_count ever dips below 0.
*
* Consumers must not use reset_control_(de)assert on shared reset lines when
* reset_control_reset or reset_control_rearm have been used.
*
* If rstc is NULL the function will just return 0.
*/
int reset_control_rearm(struct reset_control *rstc)
{
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (reset_control_is_array(rstc))
return reset_control_array_rearm(rstc_to_array(rstc));
if (rstc->shared) {
if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
return -EINVAL;
WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
} else {
if (!rstc->acquired)
return -EPERM;
}
return 0;
}
EXPORT_SYMBOL_GPL(reset_control_rearm);
/**
* reset_control_assert - asserts the reset line
* @rstc: reset controller
*
* Calling this on an exclusive reset controller guarantees that the reset
* will be asserted. When called on a shared reset controller the line may
* still be deasserted, as long as other users keep it so.
*
* For shared reset controls a driver cannot expect the hw's registers and
* internal state to be reset, but must be prepared for this to happen.
* Consumers must not use reset_control_reset on shared reset lines when
* reset_control_(de)assert has been used.
*
* If rstc is NULL it is an optional reset and the function will just
* return 0.
*/
int reset_control_assert(struct reset_control *rstc)
{
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (reset_control_is_array(rstc))
return reset_control_array_assert(rstc_to_array(rstc));
if (rstc->shared) {
if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
return -EINVAL;
if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
return -EINVAL;
if (atomic_dec_return(&rstc->deassert_count) != 0)
return 0;
/*
* Shared reset controls allow the reset line to be in any state
* after this call, so doing nothing is a valid option.
*/
if (!rstc->rcdev->ops->assert)
return 0;
} else {
/*
* If the reset controller does not implement .assert(), there
* is no way to guarantee that the reset line is asserted after
* this call.
*/
if (!rstc->rcdev->ops->assert)
return -ENOTSUPP;
if (!rstc->acquired) {
WARN(1, "reset %s (ID: %u) is not acquired\n",
rcdev_name(rstc->rcdev), rstc->id);
return -EPERM;
}
}
return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_assert);
/**
* reset_control_bulk_assert - asserts the reset lines in order
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*
* Assert the reset lines for all provided reset controls, in order.
* If an assertion fails, already asserted resets are deasserted again.
*
* See also: reset_control_assert()
*/
int reset_control_bulk_assert(int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
int ret, i;
for (i = 0; i < num_rstcs; i++) {
ret = reset_control_assert(rstcs[i].rstc);
if (ret)
goto err;
}
return 0;
err:
while (i--)
reset_control_deassert(rstcs[i].rstc);
return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
/**
* reset_control_deassert - deasserts the reset line
* @rstc: reset controller
*
* After calling this function, the reset is guaranteed to be deasserted.
* Consumers must not use reset_control_reset on shared reset lines when
* reset_control_(de)assert has been used.
*
* If rstc is NULL it is an optional reset and the function will just
* return 0.
*/
int reset_control_deassert(struct reset_control *rstc)
{
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (reset_control_is_array(rstc))
return reset_control_array_deassert(rstc_to_array(rstc));
if (rstc->shared) {
if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
return -EINVAL;
if (atomic_inc_return(&rstc->deassert_count) != 1)
return 0;
} else {
if (!rstc->acquired) {
WARN(1, "reset %s (ID: %u) is not acquired\n",
rcdev_name(rstc->rcdev), rstc->id);
return -EPERM;
}
}
/*
* If the reset controller does not implement .deassert(), we assume
* that it handles self-deasserting reset lines via .reset(). In that
* case, the reset lines are deasserted by default. If that is not the
* case, the reset controller driver should implement .deassert() and
* return -ENOTSUPP.
*/
if (!rstc->rcdev->ops->deassert)
return 0;
return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_deassert);
/**
* reset_control_bulk_deassert - deasserts the reset lines in reverse order
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*
* Deassert the reset lines for all provided reset controls, in reverse order.
* If a deassertion fails, already deasserted resets are asserted again.
*
* See also: reset_control_deassert()
*/
int reset_control_bulk_deassert(int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
int ret, i;
for (i = num_rstcs - 1; i >= 0; i--) {
ret = reset_control_deassert(rstcs[i].rstc);
if (ret)
goto err;
}
return 0;
err:
while (i < num_rstcs)
reset_control_assert(rstcs[i++].rstc);
return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
/**
* reset_control_status - returns a negative errno if not supported, a
* positive value if the reset line is asserted, or zero if the reset
* line is not asserted or if the desc is NULL (optional reset).
* @rstc: reset controller
*/
int reset_control_status(struct reset_control *rstc)
{
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
return -EINVAL;
if (rstc->rcdev->ops->status)
return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
return -ENOTSUPP;
}
EXPORT_SYMBOL_GPL(reset_control_status);
/**
* reset_control_acquire() - acquires a reset control for exclusive use
* @rstc: reset control
*
* This is used to explicitly acquire a reset control for exclusive use. Note
* that exclusive resets are requested as acquired by default. In order for a
* second consumer to be able to control the reset, the first consumer has to
* release it first. Typically the easiest way to achieve this is to call the
* reset_control_get_exclusive_released() to obtain an instance of the reset
* control. Such reset controls are not acquired by default.
*
* Consumers implementing shared access to an exclusive reset need to follow
* a specific protocol in order to work together. Before consumers can change
* a reset they must acquire exclusive access using reset_control_acquire().
* After they are done operating the reset, they must release exclusive access
* with a call to reset_control_release(). Consumers are not granted exclusive
* access to the reset as long as another consumer hasn't released a reset.
*
* See also: reset_control_release()
*/
int reset_control_acquire(struct reset_control *rstc)
{
struct reset_control *rc;
if (!rstc)
return 0;
if (WARN_ON(IS_ERR(rstc)))
return -EINVAL;
if (reset_control_is_array(rstc))
return reset_control_array_acquire(rstc_to_array(rstc));
mutex_lock(&reset_list_mutex);
if (rstc->acquired) {
mutex_unlock(&reset_list_mutex);
return 0;
}
list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
if (rstc != rc && rstc->id == rc->id) {
if (rc->acquired) {
mutex_unlock(&reset_list_mutex);
return -EBUSY;
}
}
}
rstc->acquired = true;
mutex_unlock(&reset_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(reset_control_acquire);
/**
* reset_control_bulk_acquire - acquires reset controls for exclusive use
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*
* This is used to explicitly acquire reset controls requested with
* reset_control_bulk_get_exclusive_release() for temporary exclusive use.
*
* See also: reset_control_acquire(), reset_control_bulk_release()
*/
int reset_control_bulk_acquire(int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
int ret, i;
for (i = 0; i < num_rstcs; i++) {
ret = reset_control_acquire(rstcs[i].rstc);
if (ret)
goto err;
}
return 0;
err:
while (i--)
reset_control_release(rstcs[i].rstc);
return ret;
}
EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
/**
* reset_control_release() - releases exclusive access to a reset control
* @rstc: reset control
*
* Releases exclusive access right to a reset control previously obtained by a
* call to reset_control_acquire(). Until a consumer calls this function, no
* other consumers will be granted exclusive access.
*
* See also: reset_control_acquire()
*/
void reset_control_release(struct reset_control *rstc)
{
if (!rstc || WARN_ON(IS_ERR(rstc)))
return;
if (reset_control_is_array(rstc))
reset_control_array_release(rstc_to_array(rstc));
else
rstc->acquired = false;
}
EXPORT_SYMBOL_GPL(reset_control_release);
/**
* reset_control_bulk_release() - releases exclusive access to reset controls
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*
* Releases exclusive access right to reset controls previously obtained by a
* call to reset_control_bulk_acquire().
*
* See also: reset_control_release(), reset_control_bulk_acquire()
*/
void reset_control_bulk_release(int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
int i;
for (i = 0; i < num_rstcs; i++)
reset_control_release(rstcs[i].rstc);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_release);
static struct reset_control *
__reset_control_get_internal(struct reset_controller_dev *rcdev,
unsigned int index, bool shared, bool acquired)
{
struct reset_control *rstc;
lockdep_assert_held(&reset_list_mutex);
list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
if (rstc->id == index) {
/*
* Allow creating a secondary exclusive reset_control
* that is initially not acquired for an already
* controlled reset line.
*/
if (!rstc->shared && !shared && !acquired)
break;
if (WARN_ON(!rstc->shared || !shared))
return ERR_PTR(-EBUSY);
kref_get(&rstc->refcnt);
return rstc;
}
}
rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return ERR_PTR(-ENOMEM);
if (!try_module_get(rcdev->owner)) {
kfree(rstc);
return ERR_PTR(-ENODEV);
}
rstc->rcdev = rcdev;
list_add(&rstc->list, &rcdev->reset_control_head);
rstc->id = index;
kref_init(&rstc->refcnt);
rstc->acquired = acquired;
rstc->shared = shared;
return rstc;
}
static void __reset_control_release(struct kref *kref)
{
struct reset_control *rstc = container_of(kref, struct reset_control,
refcnt);
lockdep_assert_held(&reset_list_mutex);
module_put(rstc->rcdev->owner);
list_del(&rstc->list);
kfree(rstc);
}
static void __reset_control_put_internal(struct reset_control *rstc)
{
lockdep_assert_held(&reset_list_mutex);
kref_put(&rstc->refcnt, __reset_control_release);
}
struct reset_control *
__of_reset_control_get(struct device_node *node, const char *id, int index,
bool shared, bool optional, bool acquired)
{
struct reset_control *rstc;
struct reset_controller_dev *r, *rcdev;
struct of_phandle_args args;
int rstc_id;
int ret;
if (!node)
return ERR_PTR(-EINVAL);
if (id) {
index = of_property_match_string(node,
"reset-names", id);
if (index == -EILSEQ)
return ERR_PTR(index);
if (index < 0)
return optional ? NULL : ERR_PTR(-ENOENT);
}
ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
index, &args);
if (ret == -EINVAL)
return ERR_PTR(ret);
if (ret)
return optional ? NULL : ERR_PTR(ret);
mutex_lock(&reset_list_mutex);
rcdev = NULL;
list_for_each_entry(r, &reset_controller_list, list) {
if (args.np == r->of_node) {
rcdev = r;
break;
}
}
if (!rcdev) {
rstc = ERR_PTR(-EPROBE_DEFER);
goto out;
}
if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
rstc = ERR_PTR(-EINVAL);
goto out;
}
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
rstc = ERR_PTR(rstc_id);
goto out;
}
/* reset_list_mutex also protects the rcdev's reset_control list */
rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
out:
mutex_unlock(&reset_list_mutex);
of_node_put(args.np);
return rstc;
}
EXPORT_SYMBOL_GPL(__of_reset_control_get);
static struct reset_controller_dev *
__reset_controller_by_name(const char *name)
{
struct reset_controller_dev *rcdev;
lockdep_assert_held(&reset_list_mutex);
list_for_each_entry(rcdev, &reset_controller_list, list) {
if (!rcdev->dev)
continue;
if (!strcmp(name, dev_name(rcdev->dev)))
return rcdev;
}
return NULL;
}
static struct reset_control *
__reset_control_get_from_lookup(struct device *dev, const char *con_id,
bool shared, bool optional, bool acquired)
{
const struct reset_control_lookup *lookup;
struct reset_controller_dev *rcdev;
const char *dev_id = dev_name(dev);
struct reset_control *rstc = NULL;
mutex_lock(&reset_lookup_mutex);
list_for_each_entry(lookup, &reset_lookup_list, list) {
if (strcmp(lookup->dev_id, dev_id))
continue;
if ((!con_id && !lookup->con_id) ||
((con_id && lookup->con_id) &&
!strcmp(con_id, lookup->con_id))) {
mutex_lock(&reset_list_mutex);
rcdev = __reset_controller_by_name(lookup->provider);
if (!rcdev) {
mutex_unlock(&reset_list_mutex);
mutex_unlock(&reset_lookup_mutex);
/* Reset provider may not be ready yet. */
return ERR_PTR(-EPROBE_DEFER);
}
rstc = __reset_control_get_internal(rcdev,
lookup->index,
shared, acquired);
mutex_unlock(&reset_list_mutex);
break;
}
}
mutex_unlock(&reset_lookup_mutex);
if (!rstc)
return optional ? NULL : ERR_PTR(-ENOENT);
return rstc;
}
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared, bool optional,
bool acquired)
{
if (WARN_ON(shared && acquired))
return ERR_PTR(-EINVAL);
if (dev->of_node)
return __of_reset_control_get(dev->of_node, id, index, shared,
optional, acquired);
return __reset_control_get_from_lookup(dev, id, shared, optional,
acquired);
}
EXPORT_SYMBOL_GPL(__reset_control_get);
int __reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
bool shared, bool optional, bool acquired)
{
int ret, i;
for (i = 0; i < num_rstcs; i++) {
rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
shared, optional, acquired);
if (IS_ERR(rstcs[i].rstc)) {
ret = PTR_ERR(rstcs[i].rstc);
goto err;
}
}
return 0;
err:
mutex_lock(&reset_list_mutex);
while (i--)
__reset_control_put_internal(rstcs[i].rstc);
mutex_unlock(&reset_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
static void reset_control_array_put(struct reset_control_array *resets)
{
int i;
mutex_lock(&reset_list_mutex);
for (i = 0; i < resets->num_rstcs; i++)
__reset_control_put_internal(resets->rstc[i]);
mutex_unlock(&reset_list_mutex);
kfree(resets);
}
/**
* reset_control_put - free the reset controller
* @rstc: reset controller
*/
void reset_control_put(struct reset_control *rstc)
{
if (IS_ERR_OR_NULL(rstc))
return;
if (reset_control_is_array(rstc)) {
reset_control_array_put(rstc_to_array(rstc));
return;
}
mutex_lock(&reset_list_mutex);
__reset_control_put_internal(rstc);
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);
/**
* reset_control_bulk_put - free the reset controllers
* @num_rstcs: number of entries in rstcs array
* @rstcs: array of struct reset_control_bulk_data with reset controls set
*/
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
{
mutex_lock(&reset_list_mutex);
while (num_rstcs--) {
if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
continue;
__reset_control_put_internal(rstcs[num_rstcs].rstc);
}
mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_put);
static void devm_reset_control_release(struct device *dev, void *res)
{
reset_control_put(*(struct reset_control **)res);
}
struct reset_control *
__devm_reset_control_get(struct device *dev, const char *id, int index,
bool shared, bool optional, bool acquired)
{
struct reset_control **ptr, *rstc;
ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
if (IS_ERR_OR_NULL(rstc)) {
devres_free(ptr);
return rstc;
}
*ptr = rstc;
devres_add(dev, ptr);
return rstc;
}
EXPORT_SYMBOL_GPL(__devm_reset_control_get);
struct reset_control_bulk_devres {
int num_rstcs;
struct reset_control_bulk_data *rstcs;
};
static void devm_reset_control_bulk_release(struct device *dev, void *res)
{
struct reset_control_bulk_devres *devres = res;
reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
}
int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
bool shared, bool optional, bool acquired)
{
struct reset_control_bulk_devres *ptr;
int ret;
ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return -ENOMEM;
ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
if (ret < 0) {
devres_free(ptr);
return ret;
}
ptr->num_rstcs = num_rstcs;
ptr->rstcs = rstcs;
devres_add(dev, ptr);
return 0;
}
EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
/**
* __device_reset - find reset controller associated with the device
* and perform reset
* @dev: device to be reset by the controller
* @optional: whether it is optional to reset the device
*
* Convenience wrapper for __reset_control_get() and reset_control_reset().
* This is useful for the common case of devices with single, dedicated reset
* lines. _RST firmware method will be called for devices with ACPI.
*/
int __device_reset(struct device *dev, bool optional)
{
struct reset_control *rstc;
int ret;
#ifdef CONFIG_ACPI
acpi_handle handle = ACPI_HANDLE(dev);
if (handle) {
if (!acpi_has_method(handle, "_RST"))
return optional ? 0 : -ENOENT;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
NULL)))
return -EIO;
}
#endif
rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
if (IS_ERR(rstc))
return PTR_ERR(rstc);
ret = reset_control_reset(rstc);
reset_control_put(rstc);
return ret;
}
EXPORT_SYMBOL_GPL(__device_reset);
/*
* APIs to manage an array of reset controls.
*/
/**
* of_reset_control_get_count - Count number of resets available with a device
*
* @node: device node that contains 'resets'.
*
* Returns positive reset count on success, or error number on failure and
* on count being zero.
*/
static int of_reset_control_get_count(struct device_node *node)
{
int count;
if (!node)
return -EINVAL;
count = of_count_phandle_with_args(node, "resets", "#reset-cells");
if (count == 0)
count = -ENOENT;
return count;
}
/**
* of_reset_control_array_get - Get a list of reset controls using
* device node.
*
* @np: device node for the device that requests the reset controls array
* @shared: whether reset controls are shared or not
* @optional: whether it is optional to get the reset controls
* @acquired: only one reset control may be acquired for a given controller
* and ID
*
* Returns pointer to allocated reset_control on success or error on failure
*/
struct reset_control *
of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
bool acquired)
{
struct reset_control_array *resets;
struct reset_control *rstc;
int num, i;
num = of_reset_control_get_count(np);
if (num < 0)
return optional ? NULL : ERR_PTR(num);
resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
if (!resets)
return ERR_PTR(-ENOMEM);
for (i = 0; i < num; i++) {
rstc = __of_reset_control_get(np, NULL, i, shared, optional,
acquired);
if (IS_ERR(rstc))
goto err_rst;
resets->rstc[i] = rstc;
}
resets->num_rstcs = num;
resets->base.array = true;
return &resets->base;
err_rst:
mutex_lock(&reset_list_mutex);
while (--i >= 0)
__reset_control_put_internal(resets->rstc[i]);
mutex_unlock(&reset_list_mutex);
kfree(resets);
return rstc;
}
EXPORT_SYMBOL_GPL(of_reset_control_array_get);
/**
* devm_reset_control_array_get - Resource managed reset control array get
*
* @dev: device that requests the list of reset controls
* @shared: whether reset controls are shared or not
* @optional: whether it is optional to get the reset controls
*
* The reset control array APIs are intended for a list of resets
* that just have to be asserted or deasserted, without any
* requirements on the order.
*
* Returns pointer to allocated reset_control on success or error on failure
*/
struct reset_control *
devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
{
struct reset_control **ptr, *rstc;
ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
if (IS_ERR_OR_NULL(rstc)) {
devres_free(ptr);
return rstc;
}
*ptr = rstc;
devres_add(dev, ptr);
return rstc;
}
EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
static int reset_control_get_count_from_lookup(struct device *dev)
{
const struct reset_control_lookup *lookup;
const char *dev_id;
int count = 0;
if (!dev)
return -EINVAL;
dev_id = dev_name(dev);
mutex_lock(&reset_lookup_mutex);
list_for_each_entry(lookup, &reset_lookup_list, list) {
if (!strcmp(lookup->dev_id, dev_id))
count++;
}
mutex_unlock(&reset_lookup_mutex);
if (count == 0)
count = -ENOENT;
return count;
}
/**
* reset_control_get_count - Count number of resets available with a device
*
* @dev: device for which to return the number of resets
*
* Returns positive reset count on success, or error number on failure and
* on count being zero.
*/
int reset_control_get_count(struct device *dev)
{
if (dev->of_node)
return of_reset_control_get_count(dev->of_node);
return reset_control_get_count_from_lookup(dev);
}
EXPORT_SYMBOL_GPL(reset_control_get_count);
| linux-master | drivers/reset/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Delta TN48M CPLD reset driver
*
* Copyright (C) 2021 Sartura Ltd.
*
* Author: Robert Marko <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/reset/delta,tn48m-reset.h>
#define TN48M_RESET_REG 0x10
#define TN48M_RESET_TIMEOUT_US 125000
#define TN48M_RESET_SLEEP_US 10
struct tn48_reset_map {
u8 bit;
};
struct tn48_reset_data {
struct reset_controller_dev rcdev;
struct regmap *regmap;
};
static const struct tn48_reset_map tn48m_resets[] = {
[CPU_88F7040_RESET] = {0},
[CPU_88F6820_RESET] = {1},
[MAC_98DX3265_RESET] = {2},
[PHY_88E1680_RESET] = {4},
[PHY_88E1512_RESET] = {6},
[POE_RESET] = {7},
};
static inline struct tn48_reset_data *to_tn48_reset_data(
struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct tn48_reset_data, rcdev);
}
static int tn48m_control_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct tn48_reset_data *data = to_tn48_reset_data(rcdev);
unsigned int val;
regmap_update_bits(data->regmap, TN48M_RESET_REG,
BIT(tn48m_resets[id].bit), 0);
return regmap_read_poll_timeout(data->regmap,
TN48M_RESET_REG,
val,
val & BIT(tn48m_resets[id].bit),
TN48M_RESET_SLEEP_US,
TN48M_RESET_TIMEOUT_US);
}
static int tn48m_control_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct tn48_reset_data *data = to_tn48_reset_data(rcdev);
unsigned int regval;
int ret;
ret = regmap_read(data->regmap, TN48M_RESET_REG, ®val);
if (ret < 0)
return ret;
if (BIT(tn48m_resets[id].bit) & regval)
return 0;
else
return 1;
}
static const struct reset_control_ops tn48_reset_ops = {
.reset = tn48m_control_reset,
.status = tn48m_control_status,
};
static int tn48m_reset_probe(struct platform_device *pdev)
{
struct tn48_reset_data *data;
struct regmap *regmap;
regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!regmap)
return -ENODEV;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->regmap = regmap;
data->rcdev.owner = THIS_MODULE;
data->rcdev.ops = &tn48_reset_ops;
data->rcdev.nr_resets = ARRAY_SIZE(tn48m_resets);
data->rcdev.of_node = pdev->dev.of_node;
return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static const struct of_device_id tn48m_reset_of_match[] = {
{ .compatible = "delta,tn48m-reset" },
{ }
};
MODULE_DEVICE_TABLE(of, tn48m_reset_of_match);
static struct platform_driver tn48m_reset_driver = {
.driver = {
.name = "delta-tn48m-reset",
.of_match_table = tn48m_reset_of_match,
},
.probe = tn48m_reset_probe,
};
module_platform_driver(tn48m_reset_driver);
MODULE_AUTHOR("Robert Marko <[email protected]>");
MODULE_DESCRIPTION("Delta TN48M CPLD reset driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-tn48m.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Amlogic Meson Reset Controller driver
*
* Copyright (c) 2016 BayLibre, SAS.
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/types.h>
#define BITS_PER_REG 32
struct meson_reset_param {
int reg_count;
int level_offset;
};
struct meson_reset {
void __iomem *reg_base;
const struct meson_reset_param *param;
struct reset_controller_dev rcdev;
spinlock_t lock;
};
static int meson_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct meson_reset *data =
container_of(rcdev, struct meson_reset, rcdev);
unsigned int bank = id / BITS_PER_REG;
unsigned int offset = id % BITS_PER_REG;
void __iomem *reg_addr = data->reg_base + (bank << 2);
writel(BIT(offset), reg_addr);
return 0;
}
static int meson_reset_level(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct meson_reset *data =
container_of(rcdev, struct meson_reset, rcdev);
unsigned int bank = id / BITS_PER_REG;
unsigned int offset = id % BITS_PER_REG;
void __iomem *reg_addr;
unsigned long flags;
u32 reg;
reg_addr = data->reg_base + data->param->level_offset + (bank << 2);
spin_lock_irqsave(&data->lock, flags);
reg = readl(reg_addr);
if (assert)
writel(reg & ~BIT(offset), reg_addr);
else
writel(reg | BIT(offset), reg_addr);
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
static int meson_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return meson_reset_level(rcdev, id, true);
}
static int meson_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return meson_reset_level(rcdev, id, false);
}
static const struct reset_control_ops meson_reset_ops = {
.reset = meson_reset_reset,
.assert = meson_reset_assert,
.deassert = meson_reset_deassert,
};
static const struct meson_reset_param meson8b_param = {
.reg_count = 8,
.level_offset = 0x7c,
};
static const struct meson_reset_param meson_a1_param = {
.reg_count = 3,
.level_offset = 0x40,
};
static const struct meson_reset_param meson_s4_param = {
.reg_count = 6,
.level_offset = 0x40,
};
static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson8b-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-axg-reset", .data = &meson8b_param},
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ .compatible = "amlogic,meson-s4-reset", .data = &meson_s4_param},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
struct meson_reset *data;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
data->param = of_device_get_match_data(&pdev->dev);
if (!data->param)
return -ENODEV;
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = data->param->reg_count * BITS_PER_REG;
data->rcdev.ops = &meson_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static struct platform_driver meson_reset_driver = {
.probe = meson_reset_probe,
.driver = {
.name = "meson_reset",
.of_match_table = meson_reset_dt_ids,
},
};
module_platform_driver(meson_reset_driver);
MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/reset/reset-meson.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TI SYSCON regmap reset driver
*
* Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <[email protected]>
* Suman Anna <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/reset/ti-syscon.h>
/**
* struct ti_syscon_reset_control - reset control structure
* @assert_offset: reset assert control register offset from syscon base
* @assert_bit: reset assert bit in the reset assert control register
* @deassert_offset: reset deassert control register offset from syscon base
* @deassert_bit: reset deassert bit in the reset deassert control register
* @status_offset: reset status register offset from syscon base
* @status_bit: reset status bit in the reset status register
* @flags: reset flag indicating how the (de)assert and status are handled
*/
struct ti_syscon_reset_control {
unsigned int assert_offset;
unsigned int assert_bit;
unsigned int deassert_offset;
unsigned int deassert_bit;
unsigned int status_offset;
unsigned int status_bit;
u32 flags;
};
/**
* struct ti_syscon_reset_data - reset controller information structure
* @rcdev: reset controller entity
* @regmap: regmap handle containing the memory-mapped reset registers
* @controls: array of reset controls
* @nr_controls: number of controls in control array
*/
struct ti_syscon_reset_data {
struct reset_controller_dev rcdev;
struct regmap *regmap;
struct ti_syscon_reset_control *controls;
unsigned int nr_controls;
};
#define to_ti_syscon_reset_data(_rcdev) \
container_of(_rcdev, struct ti_syscon_reset_data, rcdev)
/**
* ti_syscon_reset_assert() - assert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be asserted
*
* This function implements the reset driver op to assert a device's reset.
* This asserts the reset in a manner prescribed by the reset flags.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
struct ti_syscon_reset_control *control;
unsigned int mask, value;
if (id >= data->nr_controls)
return -EINVAL;
control = &data->controls[id];
if (control->flags & ASSERT_NONE)
return -ENOTSUPP; /* assert not supported for this reset */
mask = BIT(control->assert_bit);
value = (control->flags & ASSERT_SET) ? mask : 0x0;
return regmap_write_bits(data->regmap, control->assert_offset, mask, value);
}
/**
* ti_syscon_reset_deassert() - deassert device reset
* @rcdev: reset controller entity
* @id: ID of reset to be deasserted
*
* This function implements the reset driver op to deassert a device's reset.
* This deasserts the reset in a manner prescribed by the reset flags.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
struct ti_syscon_reset_control *control;
unsigned int mask, value;
if (id >= data->nr_controls)
return -EINVAL;
control = &data->controls[id];
if (control->flags & DEASSERT_NONE)
return -ENOTSUPP; /* deassert not supported for this reset */
mask = BIT(control->deassert_bit);
value = (control->flags & DEASSERT_SET) ? mask : 0x0;
return regmap_write_bits(data->regmap, control->deassert_offset, mask, value);
}
/**
* ti_syscon_reset_status() - check device reset status
* @rcdev: reset controller entity
* @id: ID of the reset for which the status is being requested
*
* This function implements the reset driver op to return the status of a
* device's reset.
*
* Return: 0 if reset is deasserted, true if reset is asserted, else a
* corresponding error value
*/
static int ti_syscon_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
struct ti_syscon_reset_control *control;
unsigned int reset_state;
int ret;
if (id >= data->nr_controls)
return -EINVAL;
control = &data->controls[id];
if (control->flags & STATUS_NONE)
return -ENOTSUPP; /* status not supported for this reset */
ret = regmap_read(data->regmap, control->status_offset, &reset_state);
if (ret)
return ret;
return !(reset_state & BIT(control->status_bit)) ==
!(control->flags & STATUS_SET);
}
static const struct reset_control_ops ti_syscon_reset_ops = {
.assert = ti_syscon_reset_assert,
.deassert = ti_syscon_reset_deassert,
.status = ti_syscon_reset_status,
};
static int ti_syscon_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct ti_syscon_reset_data *data;
struct regmap *regmap;
const __be32 *list;
struct ti_syscon_reset_control *controls;
int size, nr_controls, i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
list = of_get_property(np, "ti,reset-bits", &size);
if (!list || (size / sizeof(*list)) % 7 != 0) {
dev_err(dev, "invalid DT reset description\n");
return -EINVAL;
}
nr_controls = (size / sizeof(*list)) / 7;
controls = devm_kcalloc(dev, nr_controls, sizeof(*controls),
GFP_KERNEL);
if (!controls)
return -ENOMEM;
for (i = 0; i < nr_controls; i++) {
controls[i].assert_offset = be32_to_cpup(list++);
controls[i].assert_bit = be32_to_cpup(list++);
controls[i].deassert_offset = be32_to_cpup(list++);
controls[i].deassert_bit = be32_to_cpup(list++);
controls[i].status_offset = be32_to_cpup(list++);
controls[i].status_bit = be32_to_cpup(list++);
controls[i].flags = be32_to_cpup(list++);
}
data->rcdev.ops = &ti_syscon_reset_ops;
data->rcdev.owner = THIS_MODULE;
data->rcdev.of_node = np;
data->rcdev.nr_resets = nr_controls;
data->regmap = regmap;
data->controls = controls;
data->nr_controls = nr_controls;
return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct of_device_id ti_syscon_reset_of_match[] = {
{ .compatible = "ti,syscon-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_syscon_reset_of_match);
static struct platform_driver ti_syscon_reset_driver = {
.probe = ti_syscon_reset_probe,
.driver = {
.name = "ti-syscon-reset",
.of_match_table = ti_syscon_reset_of_match,
},
};
module_platform_driver(ti_syscon_reset_driver);
MODULE_AUTHOR("Andrew F. Davis <[email protected]>");
MODULE_AUTHOR("Suman Anna <[email protected]>");
MODULE_DESCRIPTION("TI SYSCON Regmap Reset Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-ti-syscon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom STB generic reset controller for SW_INIT style reset controller
*
* Author: Florian Fainelli <[email protected]>
* Copyright (C) 2018 Broadcom
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
struct brcmstb_reset {
void __iomem *base;
struct reset_controller_dev rcdev;
};
#define SW_INIT_SET 0x00
#define SW_INIT_CLEAR 0x04
#define SW_INIT_STATUS 0x08
#define SW_INIT_BIT(id) BIT((id) & 0x1f)
#define SW_INIT_BANK(id) ((id) >> 5)
/* A full bank contains extra registers that we are not utilizing but still
* qualify as a single bank.
*/
#define SW_INIT_BANK_SIZE 0x18
static inline
struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct brcmstb_reset, rcdev);
}
static int brcmstb_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
struct brcmstb_reset *priv = to_brcmstb(rcdev);
writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET);
return 0;
}
static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
struct brcmstb_reset *priv = to_brcmstb(rcdev);
writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR);
/* Maximum reset delay after de-asserting a line and seeing block
* operation is typically 14us for the worst case, build some slack
* here.
*/
usleep_range(100, 200);
return 0;
}
static int brcmstb_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
struct brcmstb_reset *priv = to_brcmstb(rcdev);
return readl_relaxed(priv->base + off + SW_INIT_STATUS) &
SW_INIT_BIT(id);
}
static const struct reset_control_ops brcmstb_reset_ops = {
.assert = brcmstb_reset_assert,
.deassert = brcmstb_reset_deassert,
.status = brcmstb_reset_status,
};
static int brcmstb_reset_probe(struct platform_device *pdev)
{
struct device *kdev = &pdev->dev;
struct brcmstb_reset *priv;
struct resource *res;
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(kdev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
dev_set_drvdata(kdev, priv);
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res),
SW_INIT_BANK_SIZE) * 32;
priv->rcdev.ops = &brcmstb_reset_ops;
priv->rcdev.of_node = kdev->of_node;
/* Use defaults: 1 cell and simple xlate function */
return devm_reset_controller_register(kdev, &priv->rcdev);
}
static const struct of_device_id brcmstb_reset_of_match[] = {
{ .compatible = "brcm,brcmstb-reset" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, brcmstb_reset_of_match);
static struct platform_driver brcmstb_reset_driver = {
.probe = brcmstb_reset_probe,
.driver = {
.name = "brcmstb-reset",
.of_match_table = brcmstb_reset_of_match,
},
};
module_platform_driver(brcmstb_reset_driver);
MODULE_AUTHOR("Broadcom");
MODULE_DESCRIPTION("Broadcom STB reset controller");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-brcmstb.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Nuvoton Technology corporation.
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of_address.h>
/* NPCM7xx GCR registers */
#define NPCM_MDLR_OFFSET 0x7C
#define NPCM7XX_MDLR_USBD0 BIT(9)
#define NPCM7XX_MDLR_USBD1 BIT(8)
#define NPCM7XX_MDLR_USBD2_4 BIT(21)
#define NPCM7XX_MDLR_USBD5_9 BIT(22)
/* NPCM8xx MDLR bits */
#define NPCM8XX_MDLR_USBD0_3 BIT(9)
#define NPCM8XX_MDLR_USBD4_7 BIT(22)
#define NPCM8XX_MDLR_USBD8 BIT(24)
#define NPCM8XX_MDLR_USBD9 BIT(21)
#define NPCM_USB1PHYCTL_OFFSET 0x140
#define NPCM_USB2PHYCTL_OFFSET 0x144
#define NPCM_USB3PHYCTL_OFFSET 0x148
#define NPCM_USBXPHYCTL_RS BIT(28)
/* NPCM7xx Reset registers */
#define NPCM_SWRSTR 0x14
#define NPCM_SWRST BIT(2)
#define NPCM_IPSRST1 0x20
#define NPCM_IPSRST1_USBD1 BIT(5)
#define NPCM_IPSRST1_USBD2 BIT(8)
#define NPCM_IPSRST1_USBD3 BIT(25)
#define NPCM_IPSRST1_USBD4 BIT(22)
#define NPCM_IPSRST1_USBD5 BIT(23)
#define NPCM_IPSRST1_USBD6 BIT(24)
#define NPCM_IPSRST2 0x24
#define NPCM_IPSRST2_USB_HOST BIT(26)
#define NPCM_IPSRST3 0x34
#define NPCM_IPSRST3_USBD0 BIT(4)
#define NPCM_IPSRST3_USBD7 BIT(5)
#define NPCM_IPSRST3_USBD8 BIT(6)
#define NPCM_IPSRST3_USBD9 BIT(7)
#define NPCM_IPSRST3_USBPHY1 BIT(24)
#define NPCM_IPSRST3_USBPHY2 BIT(25)
#define NPCM_IPSRST4 0x74
#define NPCM_IPSRST4_USBPHY3 BIT(25)
#define NPCM_IPSRST4_USB_HOST2 BIT(31)
#define NPCM_RC_RESETS_PER_REG 32
#define NPCM_MASK_RESETS GENMASK(4, 0)
enum {
BMC_NPCM7XX = 0,
BMC_NPCM8XX,
};
static const u32 npxm7xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3};
static const u32 npxm8xx_ipsrst[] = {NPCM_IPSRST1, NPCM_IPSRST2, NPCM_IPSRST3,
NPCM_IPSRST4};
struct npcm_reset_info {
u32 bmc_id;
u32 num_ipsrst;
const u32 *ipsrst;
};
static const struct npcm_reset_info npxm7xx_reset_info[] = {
{.bmc_id = BMC_NPCM7XX, .num_ipsrst = 3, .ipsrst = npxm7xx_ipsrst}};
static const struct npcm_reset_info npxm8xx_reset_info[] = {
{.bmc_id = BMC_NPCM8XX, .num_ipsrst = 4, .ipsrst = npxm8xx_ipsrst}};
struct npcm_rc_data {
struct reset_controller_dev rcdev;
struct notifier_block restart_nb;
const struct npcm_reset_info *info;
struct regmap *gcr_regmap;
u32 sw_reset_number;
void __iomem *base;
spinlock_t lock;
};
#define to_rc_data(p) container_of(p, struct npcm_rc_data, rcdev)
static int npcm_rc_restart(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
struct npcm_rc_data *rc = container_of(nb, struct npcm_rc_data,
restart_nb);
writel(NPCM_SWRST << rc->sw_reset_number, rc->base + NPCM_SWRSTR);
mdelay(1000);
pr_emerg("%s: unable to restart system\n", __func__);
return NOTIFY_DONE;
}
static int npcm_rc_setclear_reset(struct reset_controller_dev *rcdev,
unsigned long id, bool set)
{
struct npcm_rc_data *rc = to_rc_data(rcdev);
unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
unsigned int ctrl_offset = id >> 8;
unsigned long flags;
u32 stat;
spin_lock_irqsave(&rc->lock, flags);
stat = readl(rc->base + ctrl_offset);
if (set)
writel(stat | rst_bit, rc->base + ctrl_offset);
else
writel(stat & ~rst_bit, rc->base + ctrl_offset);
spin_unlock_irqrestore(&rc->lock, flags);
return 0;
}
static int npcm_rc_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
return npcm_rc_setclear_reset(rcdev, id, true);
}
static int npcm_rc_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return npcm_rc_setclear_reset(rcdev, id, false);
}
static int npcm_rc_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct npcm_rc_data *rc = to_rc_data(rcdev);
unsigned int rst_bit = BIT(id & NPCM_MASK_RESETS);
unsigned int ctrl_offset = id >> 8;
return (readl(rc->base + ctrl_offset) & rst_bit);
}
static int npcm_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
struct npcm_rc_data *rc = to_rc_data(rcdev);
unsigned int offset, bit;
bool offset_found = false;
int off_num;
offset = reset_spec->args[0];
for (off_num = 0 ; off_num < rc->info->num_ipsrst ; off_num++) {
if (offset == rc->info->ipsrst[off_num]) {
offset_found = true;
break;
}
}
if (!offset_found) {
dev_err(rcdev->dev, "Error reset register (0x%x)\n", offset);
return -EINVAL;
}
bit = reset_spec->args[1];
if (bit >= NPCM_RC_RESETS_PER_REG) {
dev_err(rcdev->dev, "Error reset number (%d)\n", bit);
return -EINVAL;
}
return (offset << 8) | bit;
}
static const struct of_device_id npcm_rc_match[] = {
{ .compatible = "nuvoton,npcm750-reset", .data = &npxm7xx_reset_info},
{ .compatible = "nuvoton,npcm845-reset", .data = &npxm8xx_reset_info},
{ }
};
static void npcm_usb_reset_npcm7xx(struct npcm_rc_data *rc)
{
u32 mdlr, iprst1, iprst2, iprst3;
u32 ipsrst1_bits = 0;
u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
u32 ipsrst3_bits = 0;
/* checking which USB device is enabled */
regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
if (!(mdlr & NPCM7XX_MDLR_USBD0))
ipsrst3_bits |= NPCM_IPSRST3_USBD0;
if (!(mdlr & NPCM7XX_MDLR_USBD1))
ipsrst1_bits |= NPCM_IPSRST1_USBD1;
if (!(mdlr & NPCM7XX_MDLR_USBD2_4))
ipsrst1_bits |= (NPCM_IPSRST1_USBD2 |
NPCM_IPSRST1_USBD3 |
NPCM_IPSRST1_USBD4);
if (!(mdlr & NPCM7XX_MDLR_USBD0)) {
ipsrst1_bits |= (NPCM_IPSRST1_USBD5 |
NPCM_IPSRST1_USBD6);
ipsrst3_bits |= (NPCM_IPSRST3_USBD7 |
NPCM_IPSRST3_USBD8 |
NPCM_IPSRST3_USBD9);
}
/* assert reset USB PHY and USB devices */
iprst1 = readl(rc->base + NPCM_IPSRST1);
iprst2 = readl(rc->base + NPCM_IPSRST2);
iprst3 = readl(rc->base + NPCM_IPSRST3);
iprst1 |= ipsrst1_bits;
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
writel(iprst3, rc->base + NPCM_IPSRST3);
/* clear USB PHY RS bit */
regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
/* deassert reset USB PHY */
iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2);
writel(iprst3, rc->base + NPCM_IPSRST3);
udelay(50);
/* set USB PHY RS bit */
regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
/* deassert reset USB devices*/
iprst1 &= ~ipsrst1_bits;
iprst2 &= ~ipsrst2_bits;
iprst3 &= ~ipsrst3_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
writel(iprst3, rc->base + NPCM_IPSRST3);
}
static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
{
u32 mdlr, iprst1, iprst2, iprst3, iprst4;
u32 ipsrst1_bits = 0;
u32 ipsrst2_bits = NPCM_IPSRST2_USB_HOST;
u32 ipsrst3_bits = 0;
u32 ipsrst4_bits = NPCM_IPSRST4_USB_HOST2 | NPCM_IPSRST4_USBPHY3;
/* checking which USB device is enabled */
regmap_read(rc->gcr_regmap, NPCM_MDLR_OFFSET, &mdlr);
if (!(mdlr & NPCM8XX_MDLR_USBD0_3)) {
ipsrst3_bits |= NPCM_IPSRST3_USBD0;
ipsrst1_bits |= (NPCM_IPSRST1_USBD1 |
NPCM_IPSRST1_USBD2 |
NPCM_IPSRST1_USBD3);
}
if (!(mdlr & NPCM8XX_MDLR_USBD4_7)) {
ipsrst1_bits |= (NPCM_IPSRST1_USBD4 |
NPCM_IPSRST1_USBD5 |
NPCM_IPSRST1_USBD6);
ipsrst3_bits |= NPCM_IPSRST3_USBD7;
}
if (!(mdlr & NPCM8XX_MDLR_USBD8))
ipsrst3_bits |= NPCM_IPSRST3_USBD8;
if (!(mdlr & NPCM8XX_MDLR_USBD9))
ipsrst3_bits |= NPCM_IPSRST3_USBD9;
/* assert reset USB PHY and USB devices */
iprst1 = readl(rc->base + NPCM_IPSRST1);
iprst2 = readl(rc->base + NPCM_IPSRST2);
iprst3 = readl(rc->base + NPCM_IPSRST3);
iprst4 = readl(rc->base + NPCM_IPSRST4);
iprst1 |= ipsrst1_bits;
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
iprst4 |= ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
writel(iprst3, rc->base + NPCM_IPSRST3);
writel(iprst4, rc->base + NPCM_IPSRST4);
/* clear USB PHY RS bit */
regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, 0);
/* deassert reset USB PHY */
iprst3 &= ~(NPCM_IPSRST3_USBPHY1 | NPCM_IPSRST3_USBPHY2);
writel(iprst3, rc->base + NPCM_IPSRST3);
iprst4 &= ~NPCM_IPSRST4_USBPHY3;
writel(iprst4, rc->base + NPCM_IPSRST4);
/* set USB PHY RS bit */
regmap_update_bits(rc->gcr_regmap, NPCM_USB1PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
regmap_update_bits(rc->gcr_regmap, NPCM_USB2PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
regmap_update_bits(rc->gcr_regmap, NPCM_USB3PHYCTL_OFFSET,
NPCM_USBXPHYCTL_RS, NPCM_USBXPHYCTL_RS);
/* deassert reset USB devices*/
iprst1 &= ~ipsrst1_bits;
iprst2 &= ~ipsrst2_bits;
iprst3 &= ~ipsrst3_bits;
iprst4 &= ~ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
writel(iprst3, rc->base + NPCM_IPSRST3);
writel(iprst4, rc->base + NPCM_IPSRST4);
}
/*
* The following procedure should be observed in USB PHY, USB device and
* USB host initialization at BMC boot
*/
static int npcm_usb_reset(struct platform_device *pdev, struct npcm_rc_data *rc)
{
struct device *dev = &pdev->dev;
rc->gcr_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "nuvoton,sysgcr");
if (IS_ERR(rc->gcr_regmap)) {
dev_warn(&pdev->dev, "Failed to find nuvoton,sysgcr property, please update the device tree\n");
dev_info(&pdev->dev, "Using nuvoton,npcm750-gcr for Poleg backward compatibility\n");
rc->gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
if (IS_ERR(rc->gcr_regmap)) {
dev_err(&pdev->dev, "Failed to find nuvoton,npcm750-gcr");
return PTR_ERR(rc->gcr_regmap);
}
}
rc->info = (const struct npcm_reset_info *)
of_match_device(dev->driver->of_match_table, dev)->data;
switch (rc->info->bmc_id) {
case BMC_NPCM7XX:
npcm_usb_reset_npcm7xx(rc);
break;
case BMC_NPCM8XX:
npcm_usb_reset_npcm8xx(rc);
break;
default:
return -ENODEV;
}
return 0;
}
static const struct reset_control_ops npcm_rc_ops = {
.assert = npcm_rc_assert,
.deassert = npcm_rc_deassert,
.status = npcm_rc_status,
};
static int npcm_rc_probe(struct platform_device *pdev)
{
struct npcm_rc_data *rc;
int ret;
rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
rc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc->base))
return PTR_ERR(rc->base);
spin_lock_init(&rc->lock);
rc->rcdev.owner = THIS_MODULE;
rc->rcdev.ops = &npcm_rc_ops;
rc->rcdev.of_node = pdev->dev.of_node;
rc->rcdev.of_reset_n_cells = 2;
rc->rcdev.of_xlate = npcm_reset_xlate;
ret = devm_reset_controller_register(&pdev->dev, &rc->rcdev);
if (ret) {
dev_err(&pdev->dev, "unable to register device\n");
return ret;
}
if (npcm_usb_reset(pdev, rc))
dev_warn(&pdev->dev, "NPCM USB reset failed, can cause issues with UDC and USB host\n");
if (!of_property_read_u32(pdev->dev.of_node, "nuvoton,sw-reset-number",
&rc->sw_reset_number)) {
if (rc->sw_reset_number && rc->sw_reset_number < 5) {
rc->restart_nb.priority = 192,
rc->restart_nb.notifier_call = npcm_rc_restart,
ret = register_restart_handler(&rc->restart_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register restart handler\n");
}
}
return ret;
}
static struct platform_driver npcm_rc_driver = {
.probe = npcm_rc_probe,
.driver = {
.name = "npcm-reset",
.of_match_table = npcm_rc_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(npcm_rc_driver);
| linux-master | drivers/reset/reset-npcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Nuvoton Technology Corp.
* Author: Chi-Fang Li <[email protected]>
*/
#include <linux/bits.h>
#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
#include <dt-bindings/reset/nuvoton,ma35d1-reset.h>
struct ma35d1_reset_data {
struct reset_controller_dev rcdev;
struct notifier_block restart_handler;
void __iomem *base;
/* protect registers against concurrent read-modify-write */
spinlock_t lock;
};
static const struct {
u32 reg_ofs;
u32 bit;
} ma35d1_reset_map[] = {
[MA35D1_RESET_CHIP] = {0x20, 0},
[MA35D1_RESET_CA35CR0] = {0x20, 1},
[MA35D1_RESET_CA35CR1] = {0x20, 2},
[MA35D1_RESET_CM4] = {0x20, 3},
[MA35D1_RESET_PDMA0] = {0x20, 4},
[MA35D1_RESET_PDMA1] = {0x20, 5},
[MA35D1_RESET_PDMA2] = {0x20, 6},
[MA35D1_RESET_PDMA3] = {0x20, 7},
[MA35D1_RESET_DISP] = {0x20, 9},
[MA35D1_RESET_VCAP0] = {0x20, 10},
[MA35D1_RESET_VCAP1] = {0x20, 11},
[MA35D1_RESET_GFX] = {0x20, 12},
[MA35D1_RESET_VDEC] = {0x20, 13},
[MA35D1_RESET_WHC0] = {0x20, 14},
[MA35D1_RESET_WHC1] = {0x20, 15},
[MA35D1_RESET_GMAC0] = {0x20, 16},
[MA35D1_RESET_GMAC1] = {0x20, 17},
[MA35D1_RESET_HWSEM] = {0x20, 18},
[MA35D1_RESET_EBI] = {0x20, 19},
[MA35D1_RESET_HSUSBH0] = {0x20, 20},
[MA35D1_RESET_HSUSBH1] = {0x20, 21},
[MA35D1_RESET_HSUSBD] = {0x20, 22},
[MA35D1_RESET_USBHL] = {0x20, 23},
[MA35D1_RESET_SDH0] = {0x20, 24},
[MA35D1_RESET_SDH1] = {0x20, 25},
[MA35D1_RESET_NAND] = {0x20, 26},
[MA35D1_RESET_GPIO] = {0x20, 27},
[MA35D1_RESET_MCTLP] = {0x20, 28},
[MA35D1_RESET_MCTLC] = {0x20, 29},
[MA35D1_RESET_DDRPUB] = {0x20, 30},
[MA35D1_RESET_TMR0] = {0x24, 2},
[MA35D1_RESET_TMR1] = {0x24, 3},
[MA35D1_RESET_TMR2] = {0x24, 4},
[MA35D1_RESET_TMR3] = {0x24, 5},
[MA35D1_RESET_I2C0] = {0x24, 8},
[MA35D1_RESET_I2C1] = {0x24, 9},
[MA35D1_RESET_I2C2] = {0x24, 10},
[MA35D1_RESET_I2C3] = {0x24, 11},
[MA35D1_RESET_QSPI0] = {0x24, 12},
[MA35D1_RESET_SPI0] = {0x24, 13},
[MA35D1_RESET_SPI1] = {0x24, 14},
[MA35D1_RESET_SPI2] = {0x24, 15},
[MA35D1_RESET_UART0] = {0x24, 16},
[MA35D1_RESET_UART1] = {0x24, 17},
[MA35D1_RESET_UART2] = {0x24, 18},
[MA35D1_RESET_UART3] = {0x24, 19},
[MA35D1_RESET_UART4] = {0x24, 20},
[MA35D1_RESET_UART5] = {0x24, 21},
[MA35D1_RESET_UART6] = {0x24, 22},
[MA35D1_RESET_UART7] = {0x24, 23},
[MA35D1_RESET_CANFD0] = {0x24, 24},
[MA35D1_RESET_CANFD1] = {0x24, 25},
[MA35D1_RESET_EADC0] = {0x24, 28},
[MA35D1_RESET_I2S0] = {0x24, 29},
[MA35D1_RESET_SC0] = {0x28, 0},
[MA35D1_RESET_SC1] = {0x28, 1},
[MA35D1_RESET_QSPI1] = {0x28, 4},
[MA35D1_RESET_SPI3] = {0x28, 6},
[MA35D1_RESET_EPWM0] = {0x28, 16},
[MA35D1_RESET_EPWM1] = {0x28, 17},
[MA35D1_RESET_QEI0] = {0x28, 22},
[MA35D1_RESET_QEI1] = {0x28, 23},
[MA35D1_RESET_ECAP0] = {0x28, 26},
[MA35D1_RESET_ECAP1] = {0x28, 27},
[MA35D1_RESET_CANFD2] = {0x28, 28},
[MA35D1_RESET_ADC0] = {0x28, 31},
[MA35D1_RESET_TMR4] = {0x2C, 0},
[MA35D1_RESET_TMR5] = {0x2C, 1},
[MA35D1_RESET_TMR6] = {0x2C, 2},
[MA35D1_RESET_TMR7] = {0x2C, 3},
[MA35D1_RESET_TMR8] = {0x2C, 4},
[MA35D1_RESET_TMR9] = {0x2C, 5},
[MA35D1_RESET_TMR10] = {0x2C, 6},
[MA35D1_RESET_TMR11] = {0x2C, 7},
[MA35D1_RESET_UART8] = {0x2C, 8},
[MA35D1_RESET_UART9] = {0x2C, 9},
[MA35D1_RESET_UART10] = {0x2C, 10},
[MA35D1_RESET_UART11] = {0x2C, 11},
[MA35D1_RESET_UART12] = {0x2C, 12},
[MA35D1_RESET_UART13] = {0x2C, 13},
[MA35D1_RESET_UART14] = {0x2C, 14},
[MA35D1_RESET_UART15] = {0x2C, 15},
[MA35D1_RESET_UART16] = {0x2C, 16},
[MA35D1_RESET_I2S1] = {0x2C, 17},
[MA35D1_RESET_I2C4] = {0x2C, 18},
[MA35D1_RESET_I2C5] = {0x2C, 19},
[MA35D1_RESET_EPWM2] = {0x2C, 20},
[MA35D1_RESET_ECAP2] = {0x2C, 21},
[MA35D1_RESET_QEI2] = {0x2C, 22},
[MA35D1_RESET_CANFD3] = {0x2C, 23},
[MA35D1_RESET_KPI] = {0x2C, 24},
[MA35D1_RESET_GIC] = {0x2C, 28},
[MA35D1_RESET_SSMCC] = {0x2C, 30},
[MA35D1_RESET_SSPCC] = {0x2C, 31}
};
static int ma35d1_restart_handler(struct notifier_block *this, unsigned long mode, void *cmd)
{
struct ma35d1_reset_data *data =
container_of(this, struct ma35d1_reset_data, restart_handler);
u32 id = MA35D1_RESET_CHIP;
writel_relaxed(BIT(ma35d1_reset_map[id].bit),
data->base + ma35d1_reset_map[id].reg_ofs);
return 0;
}
static int ma35d1_reset_update(struct reset_controller_dev *rcdev, unsigned long id, bool assert)
{
struct ma35d1_reset_data *data = container_of(rcdev, struct ma35d1_reset_data, rcdev);
unsigned long flags;
u32 reg;
if (WARN_ON_ONCE(id >= ARRAY_SIZE(ma35d1_reset_map)))
return -EINVAL;
spin_lock_irqsave(&data->lock, flags);
reg = readl_relaxed(data->base + ma35d1_reset_map[id].reg_ofs);
if (assert)
reg |= BIT(ma35d1_reset_map[id].bit);
else
reg &= ~(BIT(ma35d1_reset_map[id].bit));
writel_relaxed(reg, data->base + ma35d1_reset_map[id].reg_ofs);
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
static int ma35d1_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
return ma35d1_reset_update(rcdev, id, true);
}
static int ma35d1_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
return ma35d1_reset_update(rcdev, id, false);
}
static int ma35d1_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ma35d1_reset_data *data = container_of(rcdev, struct ma35d1_reset_data, rcdev);
u32 reg;
if (WARN_ON_ONCE(id >= ARRAY_SIZE(ma35d1_reset_map)))
return -EINVAL;
reg = readl_relaxed(data->base + ma35d1_reset_map[id].reg_ofs);
return !!(reg & BIT(ma35d1_reset_map[id].bit));
}
static const struct reset_control_ops ma35d1_reset_ops = {
.assert = ma35d1_reset_assert,
.deassert = ma35d1_reset_deassert,
.status = ma35d1_reset_status,
};
static const struct of_device_id ma35d1_reset_dt_ids[] = {
{ .compatible = "nuvoton,ma35d1-reset" },
{ },
};
static int ma35d1_reset_probe(struct platform_device *pdev)
{
struct ma35d1_reset_data *reset_data;
struct device *dev = &pdev->dev;
int err;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "Device tree node not found\n");
return -EINVAL;
}
reset_data = devm_kzalloc(dev, sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
reset_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reset_data->base))
return PTR_ERR(reset_data->base);
reset_data->rcdev.owner = THIS_MODULE;
reset_data->rcdev.nr_resets = MA35D1_RESET_COUNT;
reset_data->rcdev.ops = &ma35d1_reset_ops;
reset_data->rcdev.of_node = dev->of_node;
reset_data->restart_handler.notifier_call = ma35d1_restart_handler;
reset_data->restart_handler.priority = 192;
spin_lock_init(&reset_data->lock);
err = register_restart_handler(&reset_data->restart_handler);
if (err)
dev_warn(&pdev->dev, "failed to register restart handler\n");
return devm_reset_controller_register(dev, &reset_data->rcdev);
}
static struct platform_driver ma35d1_reset_driver = {
.probe = ma35d1_reset_probe,
.driver = {
.name = "ma35d1-reset",
.of_match_table = ma35d1_reset_dt_ids,
},
};
builtin_platform_driver(ma35d1_reset_driver);
| linux-master | drivers/reset/reset-ma35d1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Reset driver for NXP LPC18xx/43xx Reset Generation Unit (RGU).
*
* Copyright (C) 2015 Joachim Eastwood <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
/* LPC18xx RGU registers */
#define LPC18XX_RGU_CTRL0 0x100
#define LPC18XX_RGU_CTRL1 0x104
#define LPC18XX_RGU_ACTIVE_STATUS0 0x150
#define LPC18XX_RGU_ACTIVE_STATUS1 0x154
#define LPC18XX_RGU_RESETS_PER_REG 32
/* Internal reset outputs */
#define LPC18XX_RGU_CORE_RST 0
#define LPC43XX_RGU_M0SUB_RST 12
#define LPC43XX_RGU_M0APP_RST 56
struct lpc18xx_rgu_data {
struct reset_controller_dev rcdev;
struct notifier_block restart_nb;
struct clk *clk_delay;
struct clk *clk_reg;
void __iomem *base;
spinlock_t lock;
u32 delay_us;
};
#define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
static int lpc18xx_rgu_restart(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
struct lpc18xx_rgu_data *rc = container_of(nb, struct lpc18xx_rgu_data,
restart_nb);
writel(BIT(LPC18XX_RGU_CORE_RST), rc->base + LPC18XX_RGU_CTRL0);
mdelay(2000);
pr_emerg("%s: unable to restart system\n", __func__);
return NOTIFY_DONE;
}
/*
* The LPC18xx RGU has mostly self-deasserting resets except for the
* two reset lines going to the internal Cortex-M0 cores.
*
* To prevent the M0 core resets from accidentally getting deasserted
* status register must be check and bits in control register set to
* preserve the state.
*/
static int lpc18xx_rgu_setclear_reset(struct reset_controller_dev *rcdev,
unsigned long id, bool set)
{
struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
u32 stat_offset = LPC18XX_RGU_ACTIVE_STATUS0;
u32 ctrl_offset = LPC18XX_RGU_CTRL0;
unsigned long flags;
u32 stat, rst_bit;
stat_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
ctrl_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
rst_bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
spin_lock_irqsave(&rc->lock, flags);
stat = ~readl(rc->base + stat_offset);
if (set)
writel(stat | rst_bit, rc->base + ctrl_offset);
else
writel(stat & ~rst_bit, rc->base + ctrl_offset);
spin_unlock_irqrestore(&rc->lock, flags);
return 0;
}
static int lpc18xx_rgu_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return lpc18xx_rgu_setclear_reset(rcdev, id, true);
}
static int lpc18xx_rgu_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return lpc18xx_rgu_setclear_reset(rcdev, id, false);
}
/* Only M0 cores require explicit reset deassert */
static int lpc18xx_rgu_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
lpc18xx_rgu_assert(rcdev, id);
udelay(rc->delay_us);
switch (id) {
case LPC43XX_RGU_M0SUB_RST:
case LPC43XX_RGU_M0APP_RST:
lpc18xx_rgu_setclear_reset(rcdev, id, false);
}
return 0;
}
static int lpc18xx_rgu_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
u32 bit, offset = LPC18XX_RGU_ACTIVE_STATUS0;
offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
return !(readl(rc->base + offset) & bit);
}
static const struct reset_control_ops lpc18xx_rgu_ops = {
.reset = lpc18xx_rgu_reset,
.assert = lpc18xx_rgu_assert,
.deassert = lpc18xx_rgu_deassert,
.status = lpc18xx_rgu_status,
};
static int lpc18xx_rgu_probe(struct platform_device *pdev)
{
struct lpc18xx_rgu_data *rc;
u32 fcclk, firc;
int ret;
rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
rc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc->base))
return PTR_ERR(rc->base);
rc->clk_reg = devm_clk_get(&pdev->dev, "reg");
if (IS_ERR(rc->clk_reg)) {
dev_err(&pdev->dev, "reg clock not found\n");
return PTR_ERR(rc->clk_reg);
}
rc->clk_delay = devm_clk_get(&pdev->dev, "delay");
if (IS_ERR(rc->clk_delay)) {
dev_err(&pdev->dev, "delay clock not found\n");
return PTR_ERR(rc->clk_delay);
}
ret = clk_prepare_enable(rc->clk_reg);
if (ret) {
dev_err(&pdev->dev, "unable to enable reg clock\n");
return ret;
}
ret = clk_prepare_enable(rc->clk_delay);
if (ret) {
dev_err(&pdev->dev, "unable to enable delay clock\n");
goto dis_clk_reg;
}
fcclk = clk_get_rate(rc->clk_reg) / USEC_PER_SEC;
firc = clk_get_rate(rc->clk_delay) / USEC_PER_SEC;
if (fcclk == 0 || firc == 0)
rc->delay_us = 2;
else
rc->delay_us = DIV_ROUND_UP(fcclk, firc * firc);
spin_lock_init(&rc->lock);
rc->rcdev.owner = THIS_MODULE;
rc->rcdev.nr_resets = 64;
rc->rcdev.ops = &lpc18xx_rgu_ops;
rc->rcdev.of_node = pdev->dev.of_node;
ret = reset_controller_register(&rc->rcdev);
if (ret) {
dev_err(&pdev->dev, "unable to register device\n");
goto dis_clks;
}
rc->restart_nb.priority = 192,
rc->restart_nb.notifier_call = lpc18xx_rgu_restart,
ret = register_restart_handler(&rc->restart_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register restart handler\n");
return 0;
dis_clks:
clk_disable_unprepare(rc->clk_delay);
dis_clk_reg:
clk_disable_unprepare(rc->clk_reg);
return ret;
}
static const struct of_device_id lpc18xx_rgu_match[] = {
{ .compatible = "nxp,lpc1850-rgu" },
{ }
};
static struct platform_driver lpc18xx_rgu_driver = {
.probe = lpc18xx_rgu_probe,
.driver = {
.name = "lpc18xx-reset",
.of_match_table = lpc18xx_rgu_match,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(lpc18xx_rgu_driver);
| linux-master | drivers/reset/reset-lpc18xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright Intel Corporation (C) 2017. All Rights Reserved
*
* Reset driver for Altera Arria10 MAX5 System Resource Chip
*
* Adapted from reset-socfpga.c
*/
#include <linux/err.h>
#include <linux/mfd/altera-a10sr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <dt-bindings/reset/altr,rst-mgr-a10sr.h>
struct a10sr_reset {
struct reset_controller_dev rcdev;
struct regmap *regmap;
};
static inline struct a10sr_reset *to_a10sr_rst(struct reset_controller_dev *rc)
{
return container_of(rc, struct a10sr_reset, rcdev);
}
static inline int a10sr_reset_shift(unsigned long id)
{
switch (id) {
case A10SR_RESET_ENET_HPS:
return 1;
case A10SR_RESET_PCIE:
case A10SR_RESET_FILE:
case A10SR_RESET_BQSPI:
case A10SR_RESET_USB:
return id + 11;
default:
return -EINVAL;
}
}
static int a10sr_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct a10sr_reset *a10r = to_a10sr_rst(rcdev);
int offset = a10sr_reset_shift(id);
u8 mask = ALTR_A10SR_REG_BIT_MASK(offset);
int index = ALTR_A10SR_HPS_RST_REG + ALTR_A10SR_REG_OFFSET(offset);
return regmap_update_bits(a10r->regmap, index, mask, assert ? 0 : mask);
}
static int a10sr_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return a10sr_reset_update(rcdev, id, true);
}
static int a10sr_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return a10sr_reset_update(rcdev, id, false);
}
static int a10sr_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
int ret;
struct a10sr_reset *a10r = to_a10sr_rst(rcdev);
int offset = a10sr_reset_shift(id);
u8 mask = ALTR_A10SR_REG_BIT_MASK(offset);
int index = ALTR_A10SR_HPS_RST_REG + ALTR_A10SR_REG_OFFSET(offset);
unsigned int value;
ret = regmap_read(a10r->regmap, index, &value);
if (ret < 0)
return ret;
return !!(value & mask);
}
static const struct reset_control_ops a10sr_reset_ops = {
.assert = a10sr_reset_assert,
.deassert = a10sr_reset_deassert,
.status = a10sr_reset_status,
};
static int a10sr_reset_probe(struct platform_device *pdev)
{
struct altr_a10sr *a10sr = dev_get_drvdata(pdev->dev.parent);
struct a10sr_reset *a10r;
a10r = devm_kzalloc(&pdev->dev, sizeof(struct a10sr_reset),
GFP_KERNEL);
if (!a10r)
return -ENOMEM;
a10r->rcdev.owner = THIS_MODULE;
a10r->rcdev.nr_resets = A10SR_RESET_NUM;
a10r->rcdev.ops = &a10sr_reset_ops;
a10r->rcdev.of_node = pdev->dev.of_node;
a10r->regmap = a10sr->regmap;
platform_set_drvdata(pdev, a10r);
return devm_reset_controller_register(&pdev->dev, &a10r->rcdev);
}
static const struct of_device_id a10sr_reset_of_match[] = {
{ .compatible = "altr,a10sr-reset" },
{ },
};
MODULE_DEVICE_TABLE(of, a10sr_reset_of_match);
static struct platform_driver a10sr_reset_driver = {
.probe = a10sr_reset_probe,
.driver = {
.name = "altr_a10sr_reset",
.of_match_table = a10sr_reset_of_match,
},
};
module_platform_driver(a10sr_reset_driver);
MODULE_AUTHOR("Thor Thayer <[email protected]>");
MODULE_DESCRIPTION("Altera Arria10 System Resource Reset Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-a10sr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Allwinner SoCs Reset Controller driver
*
* Copyright 2013 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reset/reset-simple.h>
#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
static int sunxi_reset_init(struct device_node *np)
{
struct reset_simple_data *data;
struct resource res;
resource_size_t size;
int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = of_address_to_resource(np, 0, &res);
if (ret)
goto err_alloc;
size = resource_size(&res);
if (!request_mem_region(res.start, size, np->name)) {
ret = -EBUSY;
goto err_alloc;
}
data->membase = ioremap(res.start, size);
if (!data->membase) {
ret = -ENOMEM;
goto err_alloc;
}
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 8;
data->rcdev.ops = &reset_simple_ops;
data->rcdev.of_node = np;
data->active_low = true;
return reset_controller_register(&data->rcdev);
err_alloc:
kfree(data);
return ret;
};
/*
* These are the reset controller we need to initialize early on in
* our system, before we can even think of using a regular device
* driver for it.
* The controllers that we can register through the regular device
* model are handled by the simple reset driver directly.
*/
static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = {
{ .compatible = "allwinner,sun6i-a31-ahb1-reset", },
{ /* sentinel */ },
};
void __init sun6i_reset_init(void)
{
struct device_node *np;
for_each_matching_node(np, sunxi_early_reset_dt_ids)
sunxi_reset_init(np);
}
| linux-master | drivers/reset/reset-sunxi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/reset/qcom,sdm845-pdc.h>
#define RPMH_SDM845_PDC_SYNC_RESET 0x100
#define RPMH_SC7280_PDC_SYNC_RESET 0x1000
struct qcom_pdc_reset_map {
u8 bit;
};
struct qcom_pdc_reset_desc {
const struct qcom_pdc_reset_map *resets;
size_t num_resets;
unsigned int offset;
};
struct qcom_pdc_reset_data {
struct reset_controller_dev rcdev;
struct regmap *regmap;
const struct qcom_pdc_reset_desc *desc;
};
static const struct regmap_config pdc_regmap_config = {
.name = "pdc-reset",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
.fast_io = true,
};
static const struct qcom_pdc_reset_map sdm845_pdc_resets[] = {
[PDC_APPS_SYNC_RESET] = {0},
[PDC_SP_SYNC_RESET] = {1},
[PDC_AUDIO_SYNC_RESET] = {2},
[PDC_SENSORS_SYNC_RESET] = {3},
[PDC_AOP_SYNC_RESET] = {4},
[PDC_DEBUG_SYNC_RESET] = {5},
[PDC_GPU_SYNC_RESET] = {6},
[PDC_DISPLAY_SYNC_RESET] = {7},
[PDC_COMPUTE_SYNC_RESET] = {8},
[PDC_MODEM_SYNC_RESET] = {9},
};
static const struct qcom_pdc_reset_desc sdm845_pdc_reset_desc = {
.resets = sdm845_pdc_resets,
.num_resets = ARRAY_SIZE(sdm845_pdc_resets),
.offset = RPMH_SDM845_PDC_SYNC_RESET,
};
static const struct qcom_pdc_reset_map sc7280_pdc_resets[] = {
[PDC_APPS_SYNC_RESET] = {0},
[PDC_SP_SYNC_RESET] = {1},
[PDC_AUDIO_SYNC_RESET] = {2},
[PDC_SENSORS_SYNC_RESET] = {3},
[PDC_AOP_SYNC_RESET] = {4},
[PDC_DEBUG_SYNC_RESET] = {5},
[PDC_GPU_SYNC_RESET] = {6},
[PDC_DISPLAY_SYNC_RESET] = {7},
[PDC_COMPUTE_SYNC_RESET] = {8},
[PDC_MODEM_SYNC_RESET] = {9},
[PDC_WLAN_RF_SYNC_RESET] = {10},
[PDC_WPSS_SYNC_RESET] = {11},
};
static const struct qcom_pdc_reset_desc sc7280_pdc_reset_desc = {
.resets = sc7280_pdc_resets,
.num_resets = ARRAY_SIZE(sc7280_pdc_resets),
.offset = RPMH_SC7280_PDC_SYNC_RESET,
};
static inline struct qcom_pdc_reset_data *to_qcom_pdc_reset_data(
struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct qcom_pdc_reset_data, rcdev);
}
static int qcom_pdc_control_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
struct qcom_pdc_reset_data *data = to_qcom_pdc_reset_data(rcdev);
u32 mask = BIT(data->desc->resets[idx].bit);
return regmap_update_bits(data->regmap, data->desc->offset, mask, mask);
}
static int qcom_pdc_control_deassert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
struct qcom_pdc_reset_data *data = to_qcom_pdc_reset_data(rcdev);
u32 mask = BIT(data->desc->resets[idx].bit);
return regmap_update_bits(data->regmap, data->desc->offset, mask, 0);
}
static const struct reset_control_ops qcom_pdc_reset_ops = {
.assert = qcom_pdc_control_assert,
.deassert = qcom_pdc_control_deassert,
};
static int qcom_pdc_reset_probe(struct platform_device *pdev)
{
const struct qcom_pdc_reset_desc *desc;
struct qcom_pdc_reset_data *data;
struct device *dev = &pdev->dev;
void __iomem *base;
struct resource *res;
desc = device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->desc = desc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
data->regmap = devm_regmap_init_mmio(dev, base, &pdc_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "Unable to initialize regmap\n");
return PTR_ERR(data->regmap);
}
data->rcdev.owner = THIS_MODULE;
data->rcdev.ops = &qcom_pdc_reset_ops;
data->rcdev.nr_resets = desc->num_resets;
data->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct of_device_id qcom_pdc_reset_of_match[] = {
{ .compatible = "qcom,sc7280-pdc-global", .data = &sc7280_pdc_reset_desc },
{ .compatible = "qcom,sdm845-pdc-global", .data = &sdm845_pdc_reset_desc },
{}
};
MODULE_DEVICE_TABLE(of, qcom_pdc_reset_of_match);
static struct platform_driver qcom_pdc_reset_driver = {
.probe = qcom_pdc_reset_probe,
.driver = {
.name = "qcom_pdc_reset",
.of_match_table = qcom_pdc_reset_of_match,
},
};
module_platform_driver(qcom_pdc_reset_driver);
MODULE_DESCRIPTION("Qualcomm PDC Reset Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-qcom-pdc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PolarFire SoC (MPFS) Peripheral Clock Reset Controller
*
* Author: Conor Dooley <[email protected]>
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*
*/
#include <linux/auxiliary_bus.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
#include <soc/microchip/mpfs.h>
/*
* The ENVM reset is the lowest bit in the register & I am using the CLK_FOO
* defines in the dt to make things easier to configure - so this is accounting
* for the offset of 3 there.
*/
#define MPFS_PERIPH_OFFSET CLK_ENVM
#define MPFS_NUM_RESETS 30u
#define MPFS_SLEEP_MIN_US 100
#define MPFS_SLEEP_MAX_US 200
/* block concurrent access to the soft reset register */
static DEFINE_SPINLOCK(mpfs_reset_lock);
/*
* Peripheral clock resets
*/
static int mpfs_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpfs_reset_lock, flags);
reg = mpfs_reset_read(rcdev->dev);
reg |= BIT(id);
mpfs_reset_write(rcdev->dev, reg);
spin_unlock_irqrestore(&mpfs_reset_lock, flags);
return 0;
}
static int mpfs_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
unsigned long flags;
u32 reg;
spin_lock_irqsave(&mpfs_reset_lock, flags);
reg = mpfs_reset_read(rcdev->dev);
reg &= ~BIT(id);
mpfs_reset_write(rcdev->dev, reg);
spin_unlock_irqrestore(&mpfs_reset_lock, flags);
return 0;
}
static int mpfs_status(struct reset_controller_dev *rcdev, unsigned long id)
{
u32 reg = mpfs_reset_read(rcdev->dev);
/*
* It is safe to return here as MPFS_NUM_RESETS makes sure the sign bit
* is never hit.
*/
return (reg & BIT(id));
}
static int mpfs_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
mpfs_assert(rcdev, id);
usleep_range(MPFS_SLEEP_MIN_US, MPFS_SLEEP_MAX_US);
mpfs_deassert(rcdev, id);
return 0;
}
static const struct reset_control_ops mpfs_reset_ops = {
.reset = mpfs_reset,
.assert = mpfs_assert,
.deassert = mpfs_deassert,
.status = mpfs_status,
};
static int mpfs_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
unsigned int index = reset_spec->args[0];
/*
* CLK_RESERVED does not map to a clock, but it does map to a reset,
* so it has to be accounted for here. It is the reset for the fabric,
* so if this reset gets called - do not reset it.
*/
if (index == CLK_RESERVED) {
dev_err(rcdev->dev, "Resetting the fabric is not supported\n");
return -EINVAL;
}
if (index < MPFS_PERIPH_OFFSET || index >= (MPFS_PERIPH_OFFSET + rcdev->nr_resets)) {
dev_err(rcdev->dev, "Invalid reset index %u\n", index);
return -EINVAL;
}
return index - MPFS_PERIPH_OFFSET;
}
static int mpfs_reset_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct device *dev = &adev->dev;
struct reset_controller_dev *rcdev;
rcdev = devm_kzalloc(dev, sizeof(*rcdev), GFP_KERNEL);
if (!rcdev)
return -ENOMEM;
rcdev->dev = dev;
rcdev->dev->parent = dev->parent;
rcdev->ops = &mpfs_reset_ops;
rcdev->of_node = dev->parent->of_node;
rcdev->of_reset_n_cells = 1;
rcdev->of_xlate = mpfs_reset_xlate;
rcdev->nr_resets = MPFS_NUM_RESETS;
return devm_reset_controller_register(dev, rcdev);
}
static const struct auxiliary_device_id mpfs_reset_ids[] = {
{
.name = "clk_mpfs.reset-mpfs",
},
{ }
};
MODULE_DEVICE_TABLE(auxiliary, mpfs_reset_ids);
static struct auxiliary_driver mpfs_reset_driver = {
.probe = mpfs_reset_probe,
.id_table = mpfs_reset_ids,
};
module_auxiliary_driver(mpfs_reset_driver);
MODULE_DESCRIPTION("Microchip PolarFire SoC Reset Driver");
MODULE_AUTHOR("Conor Dooley <[email protected]>");
MODULE_IMPORT_NS(MCHP_CLK_MPFS);
| linux-master | drivers/reset/reset-mpfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, Impinj, Inc.
*
* i.MX7 System Reset Controller (SRC) driver
*
* Author: Andrey Smirnov <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
#include <dt-bindings/reset/imx8mq-reset.h>
#include <dt-bindings/reset/imx8mp-reset.h>
struct imx7_src_signal {
unsigned int offset, bit;
};
struct imx7_src_variant {
const struct imx7_src_signal *signals;
unsigned int signals_num;
struct reset_control_ops ops;
};
struct imx7_src {
struct reset_controller_dev rcdev;
struct regmap *regmap;
const struct imx7_src_signal *signals;
};
enum imx7_src_registers {
SRC_A7RCR0 = 0x0004,
SRC_M4RCR = 0x000c,
SRC_ERCR = 0x0014,
SRC_HSICPHY_RCR = 0x001c,
SRC_USBOPHY1_RCR = 0x0020,
SRC_USBOPHY2_RCR = 0x0024,
SRC_MIPIPHY_RCR = 0x0028,
SRC_PCIEPHY_RCR = 0x002c,
SRC_DDRC_RCR = 0x1000,
};
static int imx7_reset_update(struct imx7_src *imx7src,
unsigned long id, unsigned int value)
{
const struct imx7_src_signal *signal = &imx7src->signals[id];
return regmap_update_bits(imx7src->regmap,
signal->offset, signal->bit, value);
}
static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
[IMX7_RESET_A7_CORE_POR_RESET1] = { SRC_A7RCR0, BIT(1) },
[IMX7_RESET_A7_CORE_RESET0] = { SRC_A7RCR0, BIT(4) },
[IMX7_RESET_A7_CORE_RESET1] = { SRC_A7RCR0, BIT(5) },
[IMX7_RESET_A7_DBG_RESET0] = { SRC_A7RCR0, BIT(8) },
[IMX7_RESET_A7_DBG_RESET1] = { SRC_A7RCR0, BIT(9) },
[IMX7_RESET_A7_ETM_RESET0] = { SRC_A7RCR0, BIT(12) },
[IMX7_RESET_A7_ETM_RESET1] = { SRC_A7RCR0, BIT(13) },
[IMX7_RESET_A7_SOC_DBG_RESET] = { SRC_A7RCR0, BIT(20) },
[IMX7_RESET_A7_L2RESET] = { SRC_A7RCR0, BIT(21) },
[IMX7_RESET_SW_M4C_RST] = { SRC_M4RCR, BIT(1) },
[IMX7_RESET_SW_M4P_RST] = { SRC_M4RCR, BIT(2) },
[IMX7_RESET_EIM_RST] = { SRC_ERCR, BIT(0) },
[IMX7_RESET_HSICPHY_PORT_RST] = { SRC_HSICPHY_RCR, BIT(1) },
[IMX7_RESET_USBPHY1_POR] = { SRC_USBOPHY1_RCR, BIT(0) },
[IMX7_RESET_USBPHY1_PORT_RST] = { SRC_USBOPHY1_RCR, BIT(1) },
[IMX7_RESET_USBPHY2_POR] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX7_RESET_USBPHY2_PORT_RST] = { SRC_USBOPHY2_RCR, BIT(1) },
[IMX7_RESET_MIPI_PHY_MRST] = { SRC_MIPIPHY_RCR, BIT(1) },
[IMX7_RESET_MIPI_PHY_SRST] = { SRC_MIPIPHY_RCR, BIT(2) },
[IMX7_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) | BIT(1) },
[IMX7_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX7_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
[IMX7_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX7_RESET_DDRC_PRST] = { SRC_DDRC_RCR, BIT(0) },
[IMX7_RESET_DDRC_CORE_RST] = { SRC_DDRC_RCR, BIT(1) },
};
static struct imx7_src *to_imx7_src(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct imx7_src, rcdev);
}
static int imx7_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
const unsigned int bit = imx7src->signals[id].bit;
unsigned int value = assert ? bit : 0;
switch (id) {
case IMX7_RESET_PCIEPHY:
/*
* wait for more than 10us to release phy g_rst and
* btnrst
*/
if (!assert)
udelay(10);
break;
case IMX7_RESET_PCIE_CTRL_APPS_EN:
value = assert ? 0 : bit;
break;
}
return imx7_reset_update(imx7src, id, value);
}
static int imx7_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx7_reset_set(rcdev, id, true);
}
static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx7_reset_set(rcdev, id, false);
}
static const struct imx7_src_variant variant_imx7 = {
.signals = imx7_src_signals,
.signals_num = ARRAY_SIZE(imx7_src_signals),
.ops = {
.assert = imx7_reset_assert,
.deassert = imx7_reset_deassert,
},
};
enum imx8mq_src_registers {
SRC_A53RCR0 = 0x0004,
SRC_HDMI_RCR = 0x0030,
SRC_DISP_RCR = 0x0034,
SRC_GPU_RCR = 0x0040,
SRC_VPU_RCR = 0x0044,
SRC_PCIE2_RCR = 0x0048,
SRC_MIPIPHY1_RCR = 0x004c,
SRC_MIPIPHY2_RCR = 0x0050,
SRC_DDRC2_RCR = 0x1004,
};
enum imx8mp_src_registers {
SRC_SUPERMIX_RCR = 0x0018,
SRC_AUDIOMIX_RCR = 0x001c,
SRC_MLMIX_RCR = 0x0028,
SRC_GPU2D_RCR = 0x0038,
SRC_GPU3D_RCR = 0x003c,
SRC_VPU_G1_RCR = 0x0048,
SRC_VPU_G2_RCR = 0x004c,
SRC_VPUVC8KE_RCR = 0x0050,
SRC_NOC_RCR = 0x0054,
};
static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
[IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
[IMX8MQ_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
[IMX8MQ_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
[IMX8MQ_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
[IMX8MQ_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
[IMX8MQ_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
[IMX8MQ_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
[IMX8MQ_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
[IMX8MQ_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
[IMX8MQ_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
[IMX8MQ_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
[IMX8MQ_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
[IMX8MQ_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
[IMX8MQ_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
[IMX8MQ_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
[IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
[IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
[IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) },
[IMX8MQ_RESET_SW_M4C_RST] = { SRC_M4RCR, BIT(1) },
[IMX8MQ_RESET_SW_M4P_RST] = { SRC_M4RCR, BIT(2) },
[IMX8MQ_RESET_M4_ENABLE] = { SRC_M4RCR, BIT(3) },
[IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
[IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
[IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
[IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
[IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
[IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
[IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
BIT(2) | BIT(1) },
[IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX8MQ_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
[IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX8MQ_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
[IMX8MQ_RESET_DISP_RESET] = { SRC_DISP_RCR, BIT(0) },
[IMX8MQ_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
[IMX8MQ_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
[IMX8MQ_RESET_PCIEPHY2] = { SRC_PCIE2_RCR,
BIT(2) | BIT(1) },
[IMX8MQ_RESET_PCIEPHY2_PERST] = { SRC_PCIE2_RCR, BIT(3) },
[IMX8MQ_RESET_PCIE2_CTRL_APPS_EN] = { SRC_PCIE2_RCR, BIT(6) },
[IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF] = { SRC_PCIE2_RCR, BIT(11) },
[IMX8MQ_RESET_MIPI_CSI1_CORE_RESET] = { SRC_MIPIPHY1_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET] = { SRC_MIPIPHY1_RCR, BIT(1) },
[IMX8MQ_RESET_MIPI_CSI1_ESC_RESET] = { SRC_MIPIPHY1_RCR, BIT(2) },
[IMX8MQ_RESET_MIPI_CSI2_CORE_RESET] = { SRC_MIPIPHY2_RCR, BIT(0) },
[IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET] = { SRC_MIPIPHY2_RCR, BIT(1) },
[IMX8MQ_RESET_MIPI_CSI2_ESC_RESET] = { SRC_MIPIPHY2_RCR, BIT(2) },
[IMX8MQ_RESET_DDRC1_PRST] = { SRC_DDRC_RCR, BIT(0) },
[IMX8MQ_RESET_DDRC1_CORE_RESET] = { SRC_DDRC_RCR, BIT(1) },
[IMX8MQ_RESET_DDRC1_PHY_RESET] = { SRC_DDRC_RCR, BIT(2) },
[IMX8MQ_RESET_DDRC2_PHY_RESET] = { SRC_DDRC2_RCR, BIT(0) },
[IMX8MQ_RESET_DDRC2_CORE_RESET] = { SRC_DDRC2_RCR, BIT(1) },
[IMX8MQ_RESET_DDRC2_PRST] = { SRC_DDRC2_RCR, BIT(2) },
};
static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
const unsigned int bit = imx7src->signals[id].bit;
unsigned int value = assert ? bit : 0;
switch (id) {
case IMX8MQ_RESET_PCIEPHY:
case IMX8MQ_RESET_PCIEPHY2:
/*
* wait for more than 10us to release phy g_rst and
* btnrst
*/
if (!assert)
udelay(10);
break;
case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN:
case IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_RESET_N:
case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N:
case IMX8MQ_RESET_M4_ENABLE:
value = assert ? 0 : bit;
break;
}
return imx7_reset_update(imx7src, id, value);
}
static int imx8mq_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx8mq_reset_set(rcdev, id, true);
}
static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx8mq_reset_set(rcdev, id, false);
}
static const struct imx7_src_variant variant_imx8mq = {
.signals = imx8mq_src_signals,
.signals_num = ARRAY_SIZE(imx8mq_src_signals),
.ops = {
.assert = imx8mq_reset_assert,
.deassert = imx8mq_reset_deassert,
},
};
static const struct imx7_src_signal imx8mp_src_signals[IMX8MP_RESET_NUM] = {
[IMX8MP_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
[IMX8MP_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
[IMX8MP_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
[IMX8MP_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
[IMX8MP_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
[IMX8MP_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
[IMX8MP_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
[IMX8MP_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
[IMX8MP_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
[IMX8MP_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
[IMX8MP_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
[IMX8MP_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
[IMX8MP_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
[IMX8MP_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
[IMX8MP_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
[IMX8MP_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
[IMX8MP_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
[IMX8MP_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
[IMX8MP_RESET_SW_NON_SCLR_M7C_RST] = { SRC_M4RCR, BIT(0) },
[IMX8MP_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
[IMX8MP_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
[IMX8MP_RESET_SUPERMIX_RESET] = { SRC_SUPERMIX_RCR, BIT(0) },
[IMX8MP_RESET_AUDIOMIX_RESET] = { SRC_AUDIOMIX_RCR, BIT(0) },
[IMX8MP_RESET_MLMIX_RESET] = { SRC_MLMIX_RCR, BIT(0) },
[IMX8MP_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) },
[IMX8MP_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX8MP_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
[IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX8MP_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
[IMX8MP_RESET_MEDIA_RESET] = { SRC_DISP_RCR, BIT(0) },
[IMX8MP_RESET_GPU2D_RESET] = { SRC_GPU2D_RCR, BIT(0) },
[IMX8MP_RESET_GPU3D_RESET] = { SRC_GPU3D_RCR, BIT(0) },
[IMX8MP_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
[IMX8MP_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
[IMX8MP_RESET_VPU_G1_RESET] = { SRC_VPU_G1_RCR, BIT(0) },
[IMX8MP_RESET_VPU_G2_RESET] = { SRC_VPU_G2_RCR, BIT(0) },
[IMX8MP_RESET_VPUVC8KE_RESET] = { SRC_VPUVC8KE_RCR, BIT(0) },
[IMX8MP_RESET_NOC_RESET] = { SRC_NOC_RCR, BIT(0) },
};
static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
const unsigned int bit = imx7src->signals[id].bit;
unsigned int value = assert ? bit : 0;
switch (id) {
case IMX8MP_RESET_PCIEPHY:
/*
* wait for more than 10us to release phy g_rst and
* btnrst
*/
if (!assert)
udelay(10);
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
return imx7_reset_update(imx7src, id, value);
}
static int imx8mp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx8mp_reset_set(rcdev, id, true);
}
static int imx8mp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return imx8mp_reset_set(rcdev, id, false);
}
static const struct imx7_src_variant variant_imx8mp = {
.signals = imx8mp_src_signals,
.signals_num = ARRAY_SIZE(imx8mp_src_signals),
.ops = {
.assert = imx8mp_reset_assert,
.deassert = imx8mp_reset_deassert,
},
};
static int imx7_reset_probe(struct platform_device *pdev)
{
struct imx7_src *imx7src;
struct device *dev = &pdev->dev;
struct regmap_config config = { .name = "src" };
const struct imx7_src_variant *variant = of_device_get_match_data(dev);
imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
if (!imx7src)
return -ENOMEM;
imx7src->signals = variant->signals;
imx7src->regmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(imx7src->regmap)) {
dev_err(dev, "Unable to get imx7-src regmap");
return PTR_ERR(imx7src->regmap);
}
regmap_attach_dev(dev, imx7src->regmap, &config);
imx7src->rcdev.owner = THIS_MODULE;
imx7src->rcdev.nr_resets = variant->signals_num;
imx7src->rcdev.ops = &variant->ops;
imx7src->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &imx7src->rcdev);
}
static const struct of_device_id imx7_reset_dt_ids[] = {
{ .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
{ .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
{ .compatible = "fsl,imx8mp-src", .data = &variant_imx8mp },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, imx7_reset_dt_ids);
static struct platform_driver imx7_reset_driver = {
.probe = imx7_reset_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = imx7_reset_dt_ids,
},
};
module_platform_driver(imx7_reset_driver);
MODULE_AUTHOR("Andrey Smirnov <[email protected]>");
MODULE_DESCRIPTION("NXP i.MX7 reset driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-imx7.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Xilinx, Inc.
*
*/
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/firmware/xlnx-zynqmp.h>
#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
#define VERSAL_NR_RESETS 95
#define VERSAL_NET_NR_RESETS 176
struct zynqmp_reset_soc_data {
u32 reset_id;
u32 num_resets;
};
struct zynqmp_reset_data {
struct reset_controller_dev rcdev;
const struct zynqmp_reset_soc_data *data;
};
static inline struct zynqmp_reset_data *
to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct zynqmp_reset_data, rcdev);
}
static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_ASSERT);
}
static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_RELEASE);
}
static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
int err;
u32 val;
err = zynqmp_pm_reset_get_status(priv->data->reset_id + id, &val);
if (err)
return err;
return val;
}
static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
return zynqmp_pm_reset_assert(priv->data->reset_id + id,
PM_RESET_ACTION_PULSE);
}
static int zynqmp_reset_of_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
return reset_spec->args[0];
}
static const struct zynqmp_reset_soc_data zynqmp_reset_data = {
.reset_id = ZYNQMP_RESET_ID,
.num_resets = ZYNQMP_NR_RESETS,
};
static const struct zynqmp_reset_soc_data versal_reset_data = {
.reset_id = 0,
.num_resets = VERSAL_NR_RESETS,
};
static const struct zynqmp_reset_soc_data versal_net_reset_data = {
.reset_id = 0,
.num_resets = VERSAL_NET_NR_RESETS,
};
static const struct reset_control_ops zynqmp_reset_ops = {
.reset = zynqmp_reset_reset,
.assert = zynqmp_reset_assert,
.deassert = zynqmp_reset_deassert,
.status = zynqmp_reset_status,
};
static int zynqmp_reset_probe(struct platform_device *pdev)
{
struct zynqmp_reset_data *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->data = of_device_get_match_data(&pdev->dev);
if (!priv->data)
return -EINVAL;
priv->rcdev.ops = &zynqmp_reset_ops;
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.of_node = pdev->dev.of_node;
priv->rcdev.nr_resets = priv->data->num_resets;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.of_xlate = zynqmp_reset_of_xlate;
return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id zynqmp_reset_dt_ids[] = {
{ .compatible = "xlnx,zynqmp-reset", .data = &zynqmp_reset_data, },
{ .compatible = "xlnx,versal-reset", .data = &versal_reset_data, },
{ .compatible = "xlnx,versal-net-reset", .data = &versal_net_reset_data, },
{ /* sentinel */ },
};
static struct platform_driver zynqmp_reset_driver = {
.probe = zynqmp_reset_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = zynqmp_reset_dt_ids,
},
};
static int __init zynqmp_reset_init(void)
{
return platform_driver_register(&zynqmp_reset_driver);
}
arch_initcall(zynqmp_reset_init);
| linux-master | drivers/reset/reset-zynqmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015, National Instruments Corp.
*
* Xilinx Zynq Reset controller driver
*
* Author: Moritz Fischer <[email protected]>
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
#include <linux/types.h>
struct zynq_reset_data {
struct regmap *slcr;
struct reset_controller_dev rcdev;
u32 offset;
};
#define to_zynq_reset_data(p) \
container_of((p), struct zynq_reset_data, rcdev)
static int zynq_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
int bank = id / BITS_PER_LONG;
int offset = id % BITS_PER_LONG;
pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
bank, offset);
return regmap_update_bits(priv->slcr,
priv->offset + (bank * 4),
BIT(offset),
BIT(offset));
}
static int zynq_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
int bank = id / BITS_PER_LONG;
int offset = id % BITS_PER_LONG;
pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
bank, offset);
return regmap_update_bits(priv->slcr,
priv->offset + (bank * 4),
BIT(offset),
~BIT(offset));
}
static int zynq_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
int bank = id / BITS_PER_LONG;
int offset = id % BITS_PER_LONG;
int ret;
u32 reg;
pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
bank, offset);
ret = regmap_read(priv->slcr, priv->offset + (bank * 4), ®);
if (ret)
return ret;
return !!(reg & BIT(offset));
}
static const struct reset_control_ops zynq_reset_ops = {
.assert = zynq_reset_assert,
.deassert = zynq_reset_deassert,
.status = zynq_reset_status,
};
static int zynq_reset_probe(struct platform_device *pdev)
{
struct resource *res;
struct zynq_reset_data *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->slcr = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"syscon");
if (IS_ERR(priv->slcr)) {
dev_err(&pdev->dev, "unable to get zynq-slcr regmap");
return PTR_ERR(priv->slcr);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "missing IO resource\n");
return -ENODEV;
}
priv->offset = res->start;
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_LONG;
priv->rcdev.ops = &zynq_reset_ops;
priv->rcdev.of_node = pdev->dev.of_node;
return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id zynq_reset_dt_ids[] = {
{ .compatible = "xlnx,zynq-reset", },
{ /* sentinel */ },
};
static struct platform_driver zynq_reset_driver = {
.probe = zynq_reset_probe,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = zynq_reset_dt_ids,
},
};
builtin_platform_driver(zynq_reset_driver);
| linux-master | drivers/reset/reset-zynq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* TI TPS380x Supply Voltage Supervisor and Reset Controller Driver
*
* Copyright (C) 2022 Pengutronix, Marco Felsch <[email protected]>
*
* Based on Simple Reset Controller Driver
*
* Copyright (C) 2017 Pengutronix, Philipp Zabel <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/reset-controller.h>
struct tps380x_reset {
struct reset_controller_dev rcdev;
struct gpio_desc *reset_gpio;
unsigned int reset_ms;
};
struct tps380x_reset_devdata {
unsigned int min_reset_ms;
unsigned int typ_reset_ms;
unsigned int max_reset_ms;
};
static inline
struct tps380x_reset *to_tps380x_reset(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct tps380x_reset, rcdev);
}
static int
tps380x_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
gpiod_set_value_cansleep(tps380x->reset_gpio, 1);
return 0;
}
static int
tps380x_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct tps380x_reset *tps380x = to_tps380x_reset(rcdev);
gpiod_set_value_cansleep(tps380x->reset_gpio, 0);
msleep(tps380x->reset_ms);
return 0;
}
static const struct reset_control_ops reset_tps380x_ops = {
.assert = tps380x_reset_assert,
.deassert = tps380x_reset_deassert,
};
static int tps380x_reset_of_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
/* No special handling needed, we have only one reset line per device */
return 0;
}
static int tps380x_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct tps380x_reset_devdata *devdata;
struct tps380x_reset *tps380x;
devdata = device_get_match_data(dev);
if (!devdata)
return -EINVAL;
tps380x = devm_kzalloc(dev, sizeof(*tps380x), GFP_KERNEL);
if (!tps380x)
return -ENOMEM;
tps380x->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(tps380x->reset_gpio))
return dev_err_probe(dev, PTR_ERR(tps380x->reset_gpio),
"Failed to get GPIO\n");
tps380x->reset_ms = devdata->max_reset_ms;
tps380x->rcdev.ops = &reset_tps380x_ops;
tps380x->rcdev.owner = THIS_MODULE;
tps380x->rcdev.dev = dev;
tps380x->rcdev.of_node = dev->of_node;
tps380x->rcdev.of_reset_n_cells = 0;
tps380x->rcdev.of_xlate = tps380x_reset_of_xlate;
tps380x->rcdev.nr_resets = 1;
return devm_reset_controller_register(dev, &tps380x->rcdev);
}
static const struct tps380x_reset_devdata tps3801_reset_data = {
.min_reset_ms = 120,
.typ_reset_ms = 200,
.max_reset_ms = 280,
};
static const struct of_device_id tps380x_reset_dt_ids[] = {
{ .compatible = "ti,tps3801", .data = &tps3801_reset_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, tps380x_reset_dt_ids);
static struct platform_driver tps380x_reset_driver = {
.probe = tps380x_reset_probe,
.driver = {
.name = "tps380x-reset",
.of_match_table = tps380x_reset_dt_ids,
},
};
module_platform_driver(tps380x_reset_driver);
MODULE_AUTHOR("Marco Felsch <[email protected]>");
MODULE_DESCRIPTION("TI TPS380x Supply Voltage Supervisor and Reset Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-tps380x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Texas Instrument's System Control Interface (TI-SCI) reset driver
*
* Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <[email protected]>
*/
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/soc/ti/ti_sci_protocol.h>
/**
* struct ti_sci_reset_control - reset control structure
* @dev_id: SoC-specific device identifier
* @reset_mask: reset mask to use for toggling reset
* @lock: synchronize reset_mask read-modify-writes
*/
struct ti_sci_reset_control {
u32 dev_id;
u32 reset_mask;
struct mutex lock;
};
/**
* struct ti_sci_reset_data - reset controller information structure
* @rcdev: reset controller entity
* @dev: reset controller device pointer
* @sci: TI SCI handle used for communication with system controller
* @idr: idr structure for mapping ids to reset control structures
*/
struct ti_sci_reset_data {
struct reset_controller_dev rcdev;
struct device *dev;
const struct ti_sci_handle *sci;
struct idr idr;
};
#define to_ti_sci_reset_data(p) \
container_of((p), struct ti_sci_reset_data, rcdev)
/**
* ti_sci_reset_set() - program a device's reset
* @rcdev: reset controller entity
* @id: ID of the reset to toggle
* @assert: boolean flag to indicate assert or deassert
*
* This is a common internal function used to assert or deassert a device's
* reset using the TI SCI protocol. The device's reset is asserted if the
* @assert argument is true, or deasserted if @assert argument is false.
* The mechanism itself is a read-modify-write procedure, the current device
* reset register is read using a TI SCI device operation, the new value is
* set or un-set using the reset's mask, and the new reset value written by
* using another TI SCI device operation.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_sci_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
const struct ti_sci_handle *sci = data->sci;
const struct ti_sci_dev_ops *dev_ops = &sci->ops.dev_ops;
struct ti_sci_reset_control *control;
u32 reset_state;
int ret;
control = idr_find(&data->idr, id);
if (!control)
return -EINVAL;
mutex_lock(&control->lock);
ret = dev_ops->get_device_resets(sci, control->dev_id, &reset_state);
if (ret)
goto out;
if (assert)
reset_state |= control->reset_mask;
else
reset_state &= ~control->reset_mask;
ret = dev_ops->set_device_resets(sci, control->dev_id, reset_state);
out:
mutex_unlock(&control->lock);
return ret;
}
/**
* ti_sci_reset_assert() - assert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be asserted
*
* This function implements the reset driver op to assert a device's reset
* using the TI SCI protocol. This invokes the function ti_sci_reset_set()
* with the corresponding parameters as passed in, but with the @assert
* argument set to true for asserting the reset.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_sci_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return ti_sci_reset_set(rcdev, id, true);
}
/**
* ti_sci_reset_deassert() - deassert device reset
* @rcdev: reset controller entity
* @id: ID of the reset to be deasserted
*
* This function implements the reset driver op to deassert a device's reset
* using the TI SCI protocol. This invokes the function ti_sci_reset_set()
* with the corresponding parameters as passed in, but with the @assert
* argument set to false for deasserting the reset.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_sci_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return ti_sci_reset_set(rcdev, id, false);
}
/**
* ti_sci_reset_status() - check device reset status
* @rcdev: reset controller entity
* @id: ID of reset to be checked
*
* This function implements the reset driver op to return the status of a
* device's reset using the TI SCI protocol. The reset register value is read
* by invoking the TI SCI device operation .get_device_resets(), and the
* status of the specific reset is extracted and returned using this reset's
* reset mask.
*
* Return: 0 if reset is deasserted, or a non-zero value if reset is asserted
*/
static int ti_sci_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
const struct ti_sci_handle *sci = data->sci;
const struct ti_sci_dev_ops *dev_ops = &sci->ops.dev_ops;
struct ti_sci_reset_control *control;
u32 reset_state;
int ret;
control = idr_find(&data->idr, id);
if (!control)
return -EINVAL;
ret = dev_ops->get_device_resets(sci, control->dev_id, &reset_state);
if (ret)
return ret;
return reset_state & control->reset_mask;
}
static const struct reset_control_ops ti_sci_reset_ops = {
.assert = ti_sci_reset_assert,
.deassert = ti_sci_reset_deassert,
.status = ti_sci_reset_status,
};
/**
* ti_sci_reset_of_xlate() - translate a set of OF arguments to a reset ID
* @rcdev: reset controller entity
* @reset_spec: OF reset argument specifier
*
* This function performs the translation of the reset argument specifier
* values defined in a reset consumer device node. The function allocates a
* reset control structure for that device reset, and will be used by the
* driver for performing any reset functions on that reset. An idr structure
* is allocated and used to map to the reset control structure. This idr
* is used by the driver to do reset lookups.
*
* Return: 0 for successful request, else a corresponding error value
*/
static int ti_sci_reset_of_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
struct ti_sci_reset_data *data = to_ti_sci_reset_data(rcdev);
struct ti_sci_reset_control *control;
if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
return -EINVAL;
control = devm_kzalloc(data->dev, sizeof(*control), GFP_KERNEL);
if (!control)
return -ENOMEM;
control->dev_id = reset_spec->args[0];
control->reset_mask = reset_spec->args[1];
mutex_init(&control->lock);
return idr_alloc(&data->idr, control, 0, 0, GFP_KERNEL);
}
static const struct of_device_id ti_sci_reset_of_match[] = {
{ .compatible = "ti,sci-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_reset_of_match);
static int ti_sci_reset_probe(struct platform_device *pdev)
{
struct ti_sci_reset_data *data;
if (!pdev->dev.of_node)
return -ENODEV;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->sci = devm_ti_sci_get_handle(&pdev->dev);
if (IS_ERR(data->sci))
return PTR_ERR(data->sci);
data->rcdev.ops = &ti_sci_reset_ops;
data->rcdev.owner = THIS_MODULE;
data->rcdev.of_node = pdev->dev.of_node;
data->rcdev.of_reset_n_cells = 2;
data->rcdev.of_xlate = ti_sci_reset_of_xlate;
data->dev = &pdev->dev;
idr_init(&data->idr);
platform_set_drvdata(pdev, data);
return reset_controller_register(&data->rcdev);
}
static int ti_sci_reset_remove(struct platform_device *pdev)
{
struct ti_sci_reset_data *data = platform_get_drvdata(pdev);
reset_controller_unregister(&data->rcdev);
idr_destroy(&data->idr);
return 0;
}
static struct platform_driver ti_sci_reset_driver = {
.probe = ti_sci_reset_probe,
.remove = ti_sci_reset_remove,
.driver = {
.name = "ti-sci-reset",
.of_match_table = ti_sci_reset_of_match,
},
};
module_platform_driver(ti_sci_reset_driver);
MODULE_AUTHOR("Andrew F. Davis <[email protected]>");
MODULE_DESCRIPTION("TI System Control Interface (TI SCI) Reset driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-ti-sci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018, Intel Corporation
* Copied from reset-sunxi.c
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reset/reset-simple.h>
#include <linux/reset/socfpga.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#define SOCFPGA_NR_BANKS 8
static int a10_reset_init(struct device_node *np)
{
struct reset_simple_data *data;
struct resource res;
resource_size_t size;
int ret;
u32 reg_offset = 0x10;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = of_address_to_resource(np, 0, &res);
if (ret)
goto err_alloc;
size = resource_size(&res);
if (!request_mem_region(res.start, size, np->name)) {
ret = -EBUSY;
goto err_alloc;
}
data->membase = ioremap(res.start, size);
if (!data->membase) {
ret = -ENOMEM;
goto release_region;
}
if (of_property_read_u32(np, "altr,modrst-offset", ®_offset))
pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
data->membase += reg_offset;
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
data->rcdev.ops = &reset_simple_ops;
data->rcdev.of_node = np;
data->status_active_low = true;
ret = reset_controller_register(&data->rcdev);
if (ret)
pr_err("unable to register device\n");
return ret;
release_region:
release_mem_region(res.start, size);
err_alloc:
kfree(data);
return ret;
};
/*
* These are the reset controller we need to initialize early on in
* our system, before we can even think of using a regular device
* driver for it.
* The controllers that we can register through the regular device
* model are handled by the simple reset driver directly.
*/
static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
{ .compatible = "altr,rst-mgr", },
{ /* sentinel */ },
};
void __init socfpga_reset_init(void)
{
struct device_node *np;
for_each_matching_node(np, socfpga_early_reset_dt_ids)
a10_reset_init(np);
}
/*
* The early driver is problematic, because it doesn't register
* itself as a driver. This causes certain device links to prevent
* consumer devices from probing. The hacky solution is to register
* an empty driver, whose only job is to attach itself to the reset
* manager and call probe.
*/
static const struct of_device_id socfpga_reset_dt_ids[] = {
{ .compatible = "altr,rst-mgr", },
{ /* sentinel */ },
};
static int reset_simple_probe(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver reset_socfpga_driver = {
.probe = reset_simple_probe,
.driver = {
.name = "socfpga-reset",
.of_match_table = socfpga_reset_dt_ids,
},
};
builtin_platform_driver(reset_socfpga_driver);
| linux-master | drivers/reset/reset-socfpga.c |
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
// Copyright (c) 2018 BayLibre, SAS.
// Author: Jerome Brunet <[email protected]>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
#include <dt-bindings/reset/amlogic,meson-axg-audio-arb.h>
struct meson_audio_arb_data {
struct reset_controller_dev rstc;
void __iomem *regs;
struct clk *clk;
const unsigned int *reset_bits;
spinlock_t lock;
};
struct meson_audio_arb_match_data {
const unsigned int *reset_bits;
unsigned int reset_num;
};
#define ARB_GENERAL_BIT 31
static const unsigned int axg_audio_arb_reset_bits[] = {
[AXG_ARB_TODDR_A] = 0,
[AXG_ARB_TODDR_B] = 1,
[AXG_ARB_TODDR_C] = 2,
[AXG_ARB_FRDDR_A] = 4,
[AXG_ARB_FRDDR_B] = 5,
[AXG_ARB_FRDDR_C] = 6,
};
static const struct meson_audio_arb_match_data axg_audio_arb_match = {
.reset_bits = axg_audio_arb_reset_bits,
.reset_num = ARRAY_SIZE(axg_audio_arb_reset_bits),
};
static const unsigned int sm1_audio_arb_reset_bits[] = {
[AXG_ARB_TODDR_A] = 0,
[AXG_ARB_TODDR_B] = 1,
[AXG_ARB_TODDR_C] = 2,
[AXG_ARB_FRDDR_A] = 4,
[AXG_ARB_FRDDR_B] = 5,
[AXG_ARB_FRDDR_C] = 6,
[AXG_ARB_TODDR_D] = 3,
[AXG_ARB_FRDDR_D] = 7,
};
static const struct meson_audio_arb_match_data sm1_audio_arb_match = {
.reset_bits = sm1_audio_arb_reset_bits,
.reset_num = ARRAY_SIZE(sm1_audio_arb_reset_bits),
};
static int meson_audio_arb_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
u32 val;
struct meson_audio_arb_data *arb =
container_of(rcdev, struct meson_audio_arb_data, rstc);
spin_lock(&arb->lock);
val = readl(arb->regs);
if (assert)
val &= ~BIT(arb->reset_bits[id]);
else
val |= BIT(arb->reset_bits[id]);
writel(val, arb->regs);
spin_unlock(&arb->lock);
return 0;
}
static int meson_audio_arb_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
u32 val;
struct meson_audio_arb_data *arb =
container_of(rcdev, struct meson_audio_arb_data, rstc);
val = readl(arb->regs);
return !(val & BIT(arb->reset_bits[id]));
}
static int meson_audio_arb_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return meson_audio_arb_update(rcdev, id, true);
}
static int meson_audio_arb_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return meson_audio_arb_update(rcdev, id, false);
}
static const struct reset_control_ops meson_audio_arb_rstc_ops = {
.assert = meson_audio_arb_assert,
.deassert = meson_audio_arb_deassert,
.status = meson_audio_arb_status,
};
static const struct of_device_id meson_audio_arb_of_match[] = {
{
.compatible = "amlogic,meson-axg-audio-arb",
.data = &axg_audio_arb_match,
}, {
.compatible = "amlogic,meson-sm1-audio-arb",
.data = &sm1_audio_arb_match,
},
{}
};
MODULE_DEVICE_TABLE(of, meson_audio_arb_of_match);
static int meson_audio_arb_remove(struct platform_device *pdev)
{
struct meson_audio_arb_data *arb = platform_get_drvdata(pdev);
/* Disable all access */
spin_lock(&arb->lock);
writel(0, arb->regs);
spin_unlock(&arb->lock);
clk_disable_unprepare(arb->clk);
return 0;
}
static int meson_audio_arb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct meson_audio_arb_match_data *data;
struct meson_audio_arb_data *arb;
struct resource *res;
int ret;
data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
if (!arb)
return -ENOMEM;
platform_set_drvdata(pdev, arb);
arb->clk = devm_clk_get(dev, NULL);
if (IS_ERR(arb->clk))
return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arb->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(arb->regs))
return PTR_ERR(arb->regs);
spin_lock_init(&arb->lock);
arb->reset_bits = data->reset_bits;
arb->rstc.nr_resets = data->reset_num;
arb->rstc.ops = &meson_audio_arb_rstc_ops;
arb->rstc.of_node = dev->of_node;
arb->rstc.owner = THIS_MODULE;
/*
* Enable general :
* In the initial state, all memory interfaces are disabled
* and the general bit is on
*/
ret = clk_prepare_enable(arb->clk);
if (ret) {
dev_err(dev, "failed to enable arb clock\n");
return ret;
}
writel(BIT(ARB_GENERAL_BIT), arb->regs);
/* Register reset controller */
ret = devm_reset_controller_register(dev, &arb->rstc);
if (ret) {
dev_err(dev, "failed to register arb reset controller\n");
meson_audio_arb_remove(pdev);
}
return ret;
}
static struct platform_driver meson_audio_arb_pdrv = {
.probe = meson_audio_arb_probe,
.remove = meson_audio_arb_remove,
.driver = {
.name = "meson-audio-arb-reset",
.of_match_table = meson_audio_arb_of_match,
},
};
module_platform_driver(meson_audio_arb_pdrv);
MODULE_DESCRIPTION("Amlogic A113 Audio Memory Arbiter");
MODULE_AUTHOR("Jerome Brunet <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/reset-meson-audio-arb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Reset driver for the StarFive JH71X0 SoCs
*
* Copyright (C) 2021 Emil Renner Berthing <[email protected]>
*/
#include <linux/bitmap.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/reset-controller.h>
#include <linux/spinlock.h>
#include "reset-starfive-jh71x0.h"
struct jh71x0_reset {
struct reset_controller_dev rcdev;
/* protect registers against concurrent read-modify-write */
spinlock_t lock;
void __iomem *assert;
void __iomem *status;
const u32 *asserted;
};
static inline struct jh71x0_reset *
jh71x0_reset_from(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct jh71x0_reset, rcdev);
}
static int jh71x0_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
unsigned long offset = id / 32;
u32 mask = BIT(id % 32);
void __iomem *reg_assert = data->assert + offset * sizeof(u32);
void __iomem *reg_status = data->status + offset * sizeof(u32);
u32 done = data->asserted ? data->asserted[offset] & mask : 0;
u32 value;
unsigned long flags;
int ret;
if (!assert)
done ^= mask;
spin_lock_irqsave(&data->lock, flags);
value = readl(reg_assert);
if (assert)
value |= mask;
else
value &= ~mask;
writel(value, reg_assert);
/* if the associated clock is gated, deasserting might otherwise hang forever */
ret = readl_poll_timeout_atomic(reg_status, value, (value & mask) == done, 0, 1000);
spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
static int jh71x0_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return jh71x0_reset_update(rcdev, id, true);
}
static int jh71x0_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return jh71x0_reset_update(rcdev, id, false);
}
static int jh71x0_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
int ret;
ret = jh71x0_reset_assert(rcdev, id);
if (ret)
return ret;
return jh71x0_reset_deassert(rcdev, id);
}
static int jh71x0_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct jh71x0_reset *data = jh71x0_reset_from(rcdev);
unsigned long offset = id / 32;
u32 mask = BIT(id % 32);
void __iomem *reg_status = data->status + offset * sizeof(u32);
u32 value = readl(reg_status);
return !((value ^ data->asserted[offset]) & mask);
}
static const struct reset_control_ops jh71x0_reset_ops = {
.assert = jh71x0_reset_assert,
.deassert = jh71x0_reset_deassert,
.reset = jh71x0_reset_reset,
.status = jh71x0_reset_status,
};
int reset_starfive_jh71x0_register(struct device *dev, struct device_node *of_node,
void __iomem *assert, void __iomem *status,
const u32 *asserted, unsigned int nr_resets,
struct module *owner)
{
struct jh71x0_reset *data;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->rcdev.ops = &jh71x0_reset_ops;
data->rcdev.owner = owner;
data->rcdev.nr_resets = nr_resets;
data->rcdev.dev = dev;
data->rcdev.of_node = of_node;
spin_lock_init(&data->lock);
data->assert = assert;
data->status = status;
data->asserted = asserted;
return devm_reset_controller_register(dev, &data->rcdev);
}
EXPORT_SYMBOL_GPL(reset_starfive_jh71x0_register);
| linux-master | drivers/reset/starfive/reset-starfive-jh71x0.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Reset driver for the StarFive JH7100 SoC
*
* Copyright (C) 2021 Emil Renner Berthing <[email protected]>
*/
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "reset-starfive-jh71x0.h"
#include <dt-bindings/reset/starfive-jh7100.h>
/* register offsets */
#define JH7100_RESET_ASSERT0 0x00
#define JH7100_RESET_ASSERT1 0x04
#define JH7100_RESET_ASSERT2 0x08
#define JH7100_RESET_ASSERT3 0x0c
#define JH7100_RESET_STATUS0 0x10
#define JH7100_RESET_STATUS1 0x14
#define JH7100_RESET_STATUS2 0x18
#define JH7100_RESET_STATUS3 0x1c
/*
* Writing a 1 to the n'th bit of the m'th ASSERT register asserts
* line 32m + n, and writing a 0 deasserts the same line.
* Most reset lines have their status inverted so a 0 bit in the STATUS
* register means the line is asserted and a 1 means it's deasserted. A few
* lines don't though, so store the expected value of the status registers when
* all lines are asserted.
*/
static const u32 jh7100_reset_asserted[4] = {
/* STATUS0 */
BIT(JH7100_RST_U74 % 32) |
BIT(JH7100_RST_VP6_DRESET % 32) |
BIT(JH7100_RST_VP6_BRESET % 32),
/* STATUS1 */
BIT(JH7100_RST_HIFI4_DRESET % 32) |
BIT(JH7100_RST_HIFI4_BRESET % 32),
/* STATUS2 */
BIT(JH7100_RST_E24 % 32),
/* STATUS3 */
0,
};
static int __init jh7100_reset_probe(struct platform_device *pdev)
{
void __iomem *base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
return reset_starfive_jh71x0_register(&pdev->dev, pdev->dev.of_node,
base + JH7100_RESET_ASSERT0,
base + JH7100_RESET_STATUS0,
jh7100_reset_asserted,
JH7100_RSTN_END,
THIS_MODULE);
}
static const struct of_device_id jh7100_reset_dt_ids[] = {
{ .compatible = "starfive,jh7100-reset" },
{ /* sentinel */ }
};
static struct platform_driver jh7100_reset_driver = {
.driver = {
.name = "jh7100-reset",
.of_match_table = jh7100_reset_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver_probe(jh7100_reset_driver, jh7100_reset_probe);
| linux-master | drivers/reset/starfive/reset-starfive-jh7100.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Reset driver for the StarFive JH7110 SoC
*
* Copyright (C) 2022 StarFive Technology Co., Ltd.
*/
#include <linux/auxiliary_bus.h>
#include <soc/starfive/reset-starfive-jh71x0.h>
#include "reset-starfive-jh71x0.h"
#include <dt-bindings/reset/starfive,jh7110-crg.h>
struct jh7110_reset_info {
unsigned int nr_resets;
unsigned int assert_offset;
unsigned int status_offset;
};
static const struct jh7110_reset_info jh7110_sys_info = {
.nr_resets = JH7110_SYSRST_END,
.assert_offset = 0x2F8,
.status_offset = 0x308,
};
static const struct jh7110_reset_info jh7110_aon_info = {
.nr_resets = JH7110_AONRST_END,
.assert_offset = 0x38,
.status_offset = 0x3C,
};
static const struct jh7110_reset_info jh7110_stg_info = {
.nr_resets = JH7110_STGRST_END,
.assert_offset = 0x74,
.status_offset = 0x78,
};
static const struct jh7110_reset_info jh7110_isp_info = {
.nr_resets = JH7110_ISPRST_END,
.assert_offset = 0x38,
.status_offset = 0x3C,
};
static const struct jh7110_reset_info jh7110_vout_info = {
.nr_resets = JH7110_VOUTRST_END,
.assert_offset = 0x48,
.status_offset = 0x4C,
};
static int jh7110_reset_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct jh7110_reset_info *info = (struct jh7110_reset_info *)(id->driver_data);
struct jh71x0_reset_adev *rdev = to_jh71x0_reset_adev(adev);
void __iomem *base = rdev->base;
if (!info || !base)
return -ENODEV;
return reset_starfive_jh71x0_register(&adev->dev, adev->dev.parent->of_node,
base + info->assert_offset,
base + info->status_offset,
NULL,
info->nr_resets,
NULL);
}
static const struct auxiliary_device_id jh7110_reset_ids[] = {
{
.name = "clk_starfive_jh7110_sys.rst-sys",
.driver_data = (kernel_ulong_t)&jh7110_sys_info,
},
{
.name = "clk_starfive_jh7110_sys.rst-aon",
.driver_data = (kernel_ulong_t)&jh7110_aon_info,
},
{
.name = "clk_starfive_jh7110_sys.rst-stg",
.driver_data = (kernel_ulong_t)&jh7110_stg_info,
},
{
.name = "clk_starfive_jh7110_sys.rst-isp",
.driver_data = (kernel_ulong_t)&jh7110_isp_info,
},
{
.name = "clk_starfive_jh7110_sys.rst-vo",
.driver_data = (kernel_ulong_t)&jh7110_vout_info,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(auxiliary, jh7110_reset_ids);
static struct auxiliary_driver jh7110_reset_driver = {
.probe = jh7110_reset_probe,
.id_table = jh7110_reset_ids,
};
module_auxiliary_driver(jh7110_reset_driver);
MODULE_AUTHOR("Hal Feng <[email protected]>");
MODULE_DESCRIPTION("StarFive JH7110 reset driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/starfive/reset-starfive-jh7110.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 NVIDIA Corporation
*/
#include <linux/reset-controller.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
{
return container_of(rstc, struct tegra_bpmp, rstc);
}
static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
enum mrq_reset_commands command,
unsigned int id)
{
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
request.reset_id = id;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_RESET;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret)
return -EINVAL;
return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
unsigned long id)
{
return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
}
static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
unsigned long id)
{
return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
}
static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
unsigned long id)
{
return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
}
static const struct reset_control_ops tegra_bpmp_reset_ops = {
.reset = tegra_bpmp_reset_module,
.assert = tegra_bpmp_reset_assert,
.deassert = tegra_bpmp_reset_deassert,
};
int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
{
bpmp->rstc.ops = &tegra_bpmp_reset_ops;
bpmp->rstc.owner = THIS_MODULE;
bpmp->rstc.of_node = bpmp->dev->of_node;
bpmp->rstc.nr_resets = bpmp->soc->num_resets;
return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
}
| linux-master | drivers/reset/tegra/reset-bpmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016-2017 Linaro Ltd.
* Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
*/
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
struct hi3660_reset_controller {
struct reset_controller_dev rst;
struct regmap *map;
};
#define to_hi3660_reset_controller(_rst) \
container_of(_rst, struct hi3660_reset_controller, rst)
static int hi3660_reset_program_hw(struct reset_controller_dev *rcdev,
unsigned long idx, bool assert)
{
struct hi3660_reset_controller *rc = to_hi3660_reset_controller(rcdev);
unsigned int offset = idx >> 8;
unsigned int mask = BIT(idx & 0x1f);
if (assert)
return regmap_write(rc->map, offset, mask);
else
return regmap_write(rc->map, offset + 4, mask);
}
static int hi3660_reset_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
return hi3660_reset_program_hw(rcdev, idx, true);
}
static int hi3660_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
return hi3660_reset_program_hw(rcdev, idx, false);
}
static int hi3660_reset_dev(struct reset_controller_dev *rcdev,
unsigned long idx)
{
int err;
err = hi3660_reset_assert(rcdev, idx);
if (err)
return err;
return hi3660_reset_deassert(rcdev, idx);
}
static const struct reset_control_ops hi3660_reset_ops = {
.reset = hi3660_reset_dev,
.assert = hi3660_reset_assert,
.deassert = hi3660_reset_deassert,
};
static int hi3660_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
unsigned int offset, bit;
offset = reset_spec->args[0];
bit = reset_spec->args[1];
return (offset << 8) | bit;
}
static int hi3660_reset_probe(struct platform_device *pdev)
{
struct hi3660_reset_controller *rc;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
rc->map = syscon_regmap_lookup_by_phandle(np, "hisilicon,rst-syscon");
if (rc->map == ERR_PTR(-ENODEV)) {
/* fall back to the deprecated compatible */
rc->map = syscon_regmap_lookup_by_phandle(np,
"hisi,rst-syscon");
}
if (IS_ERR(rc->map)) {
return dev_err_probe(dev, PTR_ERR(rc->map),
"failed to get hisilicon,rst-syscon\n");
}
rc->rst.ops = &hi3660_reset_ops,
rc->rst.of_node = np;
rc->rst.of_reset_n_cells = 2;
rc->rst.of_xlate = hi3660_reset_xlate;
return reset_controller_register(&rc->rst);
}
static const struct of_device_id hi3660_reset_match[] = {
{ .compatible = "hisilicon,hi3660-reset", },
{},
};
MODULE_DEVICE_TABLE(of, hi3660_reset_match);
static struct platform_driver hi3660_reset_driver = {
.probe = hi3660_reset_probe,
.driver = {
.name = "hi3660-reset",
.of_match_table = hi3660_reset_match,
},
};
static int __init hi3660_reset_init(void)
{
return platform_driver_register(&hi3660_reset_driver);
}
arch_initcall(hi3660_reset_init);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:hi3660-reset");
MODULE_DESCRIPTION("HiSilicon Hi3660 Reset Driver");
| linux-master | drivers/reset/hisilicon/reset-hi3660.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hisilicon Hi6220 reset controller driver
*
* Copyright (c) 2016 Linaro Limited.
* Copyright (c) 2015-2016 HiSilicon Limited.
*
* Author: Feng Chen <[email protected]>
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#define PERIPH_ASSERT_OFFSET 0x300
#define PERIPH_DEASSERT_OFFSET 0x304
#define PERIPH_MAX_INDEX 0x509
#define SC_MEDIA_RSTEN 0x052C
#define SC_MEDIA_RSTDIS 0x0530
#define MEDIA_MAX_INDEX 8
#define to_reset_data(x) container_of(x, struct hi6220_reset_data, rc_dev)
enum hi6220_reset_ctrl_type {
PERIPHERAL,
MEDIA,
AO,
};
struct hi6220_reset_data {
struct reset_controller_dev rc_dev;
struct regmap *regmap;
};
static int hi6220_peripheral_assert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
u32 bank = idx >> 8;
u32 offset = idx & 0xff;
u32 reg = PERIPH_ASSERT_OFFSET + bank * 0x10;
return regmap_write(regmap, reg, BIT(offset));
}
static int hi6220_peripheral_deassert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
u32 bank = idx >> 8;
u32 offset = idx & 0xff;
u32 reg = PERIPH_DEASSERT_OFFSET + bank * 0x10;
return regmap_write(regmap, reg, BIT(offset));
}
static const struct reset_control_ops hi6220_peripheral_reset_ops = {
.assert = hi6220_peripheral_assert,
.deassert = hi6220_peripheral_deassert,
};
static int hi6220_media_assert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
return regmap_write(regmap, SC_MEDIA_RSTEN, BIT(idx));
}
static int hi6220_media_deassert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
return regmap_write(regmap, SC_MEDIA_RSTDIS, BIT(idx));
}
static const struct reset_control_ops hi6220_media_reset_ops = {
.assert = hi6220_media_assert,
.deassert = hi6220_media_deassert,
};
#define AO_SCTRL_SC_PW_CLKEN0 0x800
#define AO_SCTRL_SC_PW_CLKDIS0 0x804
#define AO_SCTRL_SC_PW_RSTEN0 0x810
#define AO_SCTRL_SC_PW_RSTDIS0 0x814
#define AO_SCTRL_SC_PW_ISOEN0 0x820
#define AO_SCTRL_SC_PW_ISODIS0 0x824
#define AO_MAX_INDEX 12
static int hi6220_ao_assert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
int ret;
ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTEN0, BIT(idx));
if (ret)
return ret;
ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISOEN0, BIT(idx));
if (ret)
return ret;
ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKDIS0, BIT(idx));
return ret;
}
static int hi6220_ao_deassert(struct reset_controller_dev *rc_dev,
unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
struct regmap *regmap = data->regmap;
int ret;
/*
* It was suggested to disable isolation before enabling
* the clocks and deasserting reset, to avoid glitches.
* But this order is preserved to keep it matching the
* vendor code.
*/
ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTDIS0, BIT(idx));
if (ret)
return ret;
ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISODIS0, BIT(idx));
if (ret)
return ret;
ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKEN0, BIT(idx));
return ret;
}
static const struct reset_control_ops hi6220_ao_reset_ops = {
.assert = hi6220_ao_assert,
.deassert = hi6220_ao_deassert,
};
static int hi6220_reset_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum hi6220_reset_ctrl_type type;
struct hi6220_reset_data *data;
struct regmap *regmap;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
regmap = syscon_node_to_regmap(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get reset controller regmap\n");
return PTR_ERR(regmap);
}
data->regmap = regmap;
data->rc_dev.of_node = np;
if (type == MEDIA) {
data->rc_dev.ops = &hi6220_media_reset_ops;
data->rc_dev.nr_resets = MEDIA_MAX_INDEX;
} else if (type == PERIPHERAL) {
data->rc_dev.ops = &hi6220_peripheral_reset_ops;
data->rc_dev.nr_resets = PERIPH_MAX_INDEX;
} else {
data->rc_dev.ops = &hi6220_ao_reset_ops;
data->rc_dev.nr_resets = AO_MAX_INDEX;
}
return reset_controller_register(&data->rc_dev);
}
static const struct of_device_id hi6220_reset_match[] = {
{
.compatible = "hisilicon,hi6220-sysctrl",
.data = (void *)PERIPHERAL,
},
{
.compatible = "hisilicon,hi6220-mediactrl",
.data = (void *)MEDIA,
},
{
.compatible = "hisilicon,hi6220-aoctrl",
.data = (void *)AO,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, hi6220_reset_match);
static struct platform_driver hi6220_reset_driver = {
.probe = hi6220_reset_probe,
.driver = {
.name = "reset-hi6220",
.of_match_table = hi6220_reset_match,
},
};
static int __init hi6220_reset_init(void)
{
return platform_driver_register(&hi6220_reset_driver);
}
postcore_initcall(hi6220_reset_init);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/reset/hisilicon/hi6220_reset.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 STMicroelectronics Limited
* Author: Stephen Gallimore <[email protected]>
*
* Inspired by mach-imx/src.c
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/types.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include "reset-syscfg.h"
/**
* struct syscfg_reset_channel - Reset channel regmap configuration
*
* @reset: regmap field for the channel's reset bit.
* @ack: regmap field for the channel's ack bit (optional).
*/
struct syscfg_reset_channel {
struct regmap_field *reset;
struct regmap_field *ack;
};
/**
* struct syscfg_reset_controller - A reset controller which groups together
* a set of related reset bits, which may be located in different system
* configuration registers.
*
* @rst: base reset controller structure.
* @active_low: are the resets in this controller active low, i.e. clearing
* the reset bit puts the hardware into reset.
* @channels: An array of reset channels for this controller.
*/
struct syscfg_reset_controller {
struct reset_controller_dev rst;
bool active_low;
struct syscfg_reset_channel *channels;
};
#define to_syscfg_reset_controller(_rst) \
container_of(_rst, struct syscfg_reset_controller, rst)
static int syscfg_reset_program_hw(struct reset_controller_dev *rcdev,
unsigned long idx, int assert)
{
struct syscfg_reset_controller *rst = to_syscfg_reset_controller(rcdev);
const struct syscfg_reset_channel *ch;
u32 ctrl_val = rst->active_low ? !assert : !!assert;
int err;
if (idx >= rcdev->nr_resets)
return -EINVAL;
ch = &rst->channels[idx];
err = regmap_field_write(ch->reset, ctrl_val);
if (err)
return err;
if (ch->ack) {
u32 ack_val;
err = regmap_field_read_poll_timeout(ch->ack, ack_val, (ack_val == ctrl_val),
100, USEC_PER_SEC);
if (err)
return err;
}
return 0;
}
static int syscfg_reset_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
return syscfg_reset_program_hw(rcdev, idx, true);
}
static int syscfg_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
return syscfg_reset_program_hw(rcdev, idx, false);
}
static int syscfg_reset_dev(struct reset_controller_dev *rcdev,
unsigned long idx)
{
int err;
err = syscfg_reset_assert(rcdev, idx);
if (err)
return err;
return syscfg_reset_deassert(rcdev, idx);
}
static int syscfg_reset_status(struct reset_controller_dev *rcdev,
unsigned long idx)
{
struct syscfg_reset_controller *rst = to_syscfg_reset_controller(rcdev);
const struct syscfg_reset_channel *ch;
u32 ret_val = 0;
int err;
if (idx >= rcdev->nr_resets)
return -EINVAL;
ch = &rst->channels[idx];
if (ch->ack)
err = regmap_field_read(ch->ack, &ret_val);
else
err = regmap_field_read(ch->reset, &ret_val);
if (err)
return err;
return rst->active_low ? !ret_val : !!ret_val;
}
static const struct reset_control_ops syscfg_reset_ops = {
.reset = syscfg_reset_dev,
.assert = syscfg_reset_assert,
.deassert = syscfg_reset_deassert,
.status = syscfg_reset_status,
};
static int syscfg_reset_controller_register(struct device *dev,
const struct syscfg_reset_controller_data *data)
{
struct syscfg_reset_controller *rc;
int i, err;
rc = devm_kzalloc(dev, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
rc->channels = devm_kcalloc(dev, data->nr_channels,
sizeof(*rc->channels), GFP_KERNEL);
if (!rc->channels)
return -ENOMEM;
rc->rst.ops = &syscfg_reset_ops;
rc->rst.of_node = dev->of_node;
rc->rst.nr_resets = data->nr_channels;
rc->active_low = data->active_low;
for (i = 0; i < data->nr_channels; i++) {
struct regmap *map;
struct regmap_field *f;
const char *compatible = data->channels[i].compatible;
map = syscon_regmap_lookup_by_compatible(compatible);
if (IS_ERR(map))
return PTR_ERR(map);
f = devm_regmap_field_alloc(dev, map, data->channels[i].reset);
if (IS_ERR(f))
return PTR_ERR(f);
rc->channels[i].reset = f;
if (!data->wait_for_ack)
continue;
f = devm_regmap_field_alloc(dev, map, data->channels[i].ack);
if (IS_ERR(f))
return PTR_ERR(f);
rc->channels[i].ack = f;
}
err = reset_controller_register(&rc->rst);
if (!err)
dev_info(dev, "registered\n");
return err;
}
int syscfg_reset_probe(struct platform_device *pdev)
{
struct device *dev = pdev ? &pdev->dev : NULL;
const struct of_device_id *match;
if (!dev || !dev->driver)
return -ENODEV;
match = of_match_device(dev->driver->of_match_table, dev);
if (!match || !match->data)
return -EINVAL;
return syscfg_reset_controller_register(dev, match->data);
}
| linux-master | drivers/reset/sti/reset-syscfg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 STMicroelectronics (R&D) Limited
* Author: Giuseppe Cavallaro <[email protected]>
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <dt-bindings/reset/stih407-resets.h>
#include "reset-syscfg.h"
/* STiH407 Peripheral powerdown definitions. */
static const char stih407_core[] = "st,stih407-core-syscfg";
static const char stih407_sbc_reg[] = "st,stih407-sbc-reg-syscfg";
static const char stih407_lpm[] = "st,stih407-lpm-syscfg";
#define STIH407_PDN_0(_bit) \
_SYSCFG_RST_CH(stih407_core, SYSCFG_5000, _bit, SYSSTAT_5500, _bit)
#define STIH407_PDN_1(_bit) \
_SYSCFG_RST_CH(stih407_core, SYSCFG_5001, _bit, SYSSTAT_5501, _bit)
#define STIH407_PDN_ETH(_bit, _stat) \
_SYSCFG_RST_CH(stih407_sbc_reg, SYSCFG_4032, _bit, SYSSTAT_4520, _stat)
/* Powerdown requests control 0 */
#define SYSCFG_5000 0x0
#define SYSSTAT_5500 0x7d0
/* Powerdown requests control 1 (High Speed Links) */
#define SYSCFG_5001 0x4
#define SYSSTAT_5501 0x7d4
/* Ethernet powerdown/status/reset */
#define SYSCFG_4032 0x80
#define SYSSTAT_4520 0x820
#define SYSCFG_4002 0x8
static const struct syscfg_reset_channel_data stih407_powerdowns[] = {
[STIH407_EMISS_POWERDOWN] = STIH407_PDN_0(1),
[STIH407_NAND_POWERDOWN] = STIH407_PDN_0(0),
[STIH407_USB3_POWERDOWN] = STIH407_PDN_1(6),
[STIH407_USB2_PORT1_POWERDOWN] = STIH407_PDN_1(5),
[STIH407_USB2_PORT0_POWERDOWN] = STIH407_PDN_1(4),
[STIH407_PCIE1_POWERDOWN] = STIH407_PDN_1(3),
[STIH407_PCIE0_POWERDOWN] = STIH407_PDN_1(2),
[STIH407_SATA1_POWERDOWN] = STIH407_PDN_1(1),
[STIH407_SATA0_POWERDOWN] = STIH407_PDN_1(0),
[STIH407_ETH1_POWERDOWN] = STIH407_PDN_ETH(0, 2),
};
/* Reset Generator control 0/1 */
#define SYSCFG_5128 0x200
#define SYSCFG_5131 0x20c
#define SYSCFG_5132 0x210
#define LPM_SYSCFG_1 0x4 /* Softreset IRB & SBC UART */
#define STIH407_SRST_CORE(_reg, _bit) \
_SYSCFG_RST_CH_NO_ACK(stih407_core, _reg, _bit)
#define STIH407_SRST_SBC(_reg, _bit) \
_SYSCFG_RST_CH_NO_ACK(stih407_sbc_reg, _reg, _bit)
#define STIH407_SRST_LPM(_reg, _bit) \
_SYSCFG_RST_CH_NO_ACK(stih407_lpm, _reg, _bit)
static const struct syscfg_reset_channel_data stih407_softresets[] = {
[STIH407_ETH1_SOFTRESET] = STIH407_SRST_SBC(SYSCFG_4002, 4),
[STIH407_MMC1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 3),
[STIH407_USB2_PORT0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 28),
[STIH407_USB2_PORT1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 29),
[STIH407_PICOPHY_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 30),
[STIH407_IRB_SOFTRESET] = STIH407_SRST_LPM(LPM_SYSCFG_1, 6),
[STIH407_PCIE0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 6),
[STIH407_PCIE1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 15),
[STIH407_SATA0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 7),
[STIH407_SATA1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 16),
[STIH407_MIPHY0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 4),
[STIH407_MIPHY1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 13),
[STIH407_MIPHY2_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 22),
[STIH407_SATA0_PWR_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 5),
[STIH407_SATA1_PWR_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 14),
[STIH407_DELTA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 3),
[STIH407_BLITTER_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 10),
[STIH407_HDTVOUT_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 11),
[STIH407_HDQVDP_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 12),
[STIH407_VDP_AUX_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 14),
[STIH407_COMPO_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 15),
[STIH407_HDMI_TX_PHY_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 21),
[STIH407_JPEG_DEC_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 23),
[STIH407_VP8_DEC_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 24),
[STIH407_GPU_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 30),
[STIH407_HVA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 0),
[STIH407_ERAM_HVA_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5132, 1),
[STIH407_LPM_SOFTRESET] = STIH407_SRST_SBC(SYSCFG_4002, 2),
[STIH407_KEYSCAN_SOFTRESET] = STIH407_SRST_LPM(LPM_SYSCFG_1, 8),
[STIH407_ST231_AUD_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 26),
[STIH407_ST231_DMU_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 27),
[STIH407_ST231_GP0_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5131, 28),
[STIH407_ST231_GP1_SOFTRESET] = STIH407_SRST_CORE(SYSCFG_5128, 2),
};
/* PicoPHY reset/control */
#define SYSCFG_5061 0x0f4
static const struct syscfg_reset_channel_data stih407_picophyresets[] = {
[STIH407_PICOPHY0_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 5),
[STIH407_PICOPHY1_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 6),
[STIH407_PICOPHY2_RESET] = STIH407_SRST_CORE(SYSCFG_5061, 7),
};
static const struct syscfg_reset_controller_data stih407_powerdown_controller = {
.wait_for_ack = true,
.nr_channels = ARRAY_SIZE(stih407_powerdowns),
.channels = stih407_powerdowns,
};
static const struct syscfg_reset_controller_data stih407_softreset_controller = {
.wait_for_ack = false,
.active_low = true,
.nr_channels = ARRAY_SIZE(stih407_softresets),
.channels = stih407_softresets,
};
static const struct syscfg_reset_controller_data stih407_picophyreset_controller = {
.wait_for_ack = false,
.nr_channels = ARRAY_SIZE(stih407_picophyresets),
.channels = stih407_picophyresets,
};
static const struct of_device_id stih407_reset_match[] = {
{
.compatible = "st,stih407-powerdown",
.data = &stih407_powerdown_controller,
},
{
.compatible = "st,stih407-softreset",
.data = &stih407_softreset_controller,
},
{
.compatible = "st,stih407-picophyreset",
.data = &stih407_picophyreset_controller,
},
{ /* sentinel */ },
};
static struct platform_driver stih407_reset_driver = {
.probe = syscfg_reset_probe,
.driver = {
.name = "reset-stih407",
.of_match_table = stih407_reset_match,
},
};
static int __init stih407_reset_init(void)
{
return platform_driver_register(&stih407_reset_driver);
}
arch_initcall(stih407_reset_init);
| linux-master | drivers/reset/sti/reset-stih407.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the OLPC XO-1.75 Embedded Controller.
*
* The EC protocol is documented at:
* http://wiki.laptop.org/go/XO_1.75_HOST_to_EC_Protocol
*
* Copyright (C) 2010 One Laptop per Child Foundation.
* Copyright (C) 2018 Lubomir Rintel <[email protected]>
*/
#include <linux/completion.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/olpc-ec.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/spi/spi.h>
struct ec_cmd_t {
u8 cmd;
u8 bytes_returned;
};
enum ec_chan_t {
CHAN_NONE = 0,
CHAN_SWITCH,
CHAN_CMD_RESP,
CHAN_KEYBOARD,
CHAN_TOUCHPAD,
CHAN_EVENT,
CHAN_DEBUG,
CHAN_CMD_ERROR,
};
/*
* EC events
*/
#define EVENT_AC_CHANGE 1 /* AC plugged/unplugged */
#define EVENT_BATTERY_STATUS 2 /* Battery low/full/error/gone */
#define EVENT_BATTERY_CRITICAL 3 /* Battery critical voltage */
#define EVENT_BATTERY_SOC_CHANGE 4 /* 1% SOC Change */
#define EVENT_BATTERY_ERROR 5 /* Abnormal error, query for cause */
#define EVENT_POWER_PRESSED 6 /* Power button was pressed */
#define EVENT_POWER_PRESS_WAKE 7 /* Woken up with a power button */
#define EVENT_TIMED_HOST_WAKE 8 /* Host wake timer */
#define EVENT_OLS_HIGH_LIMIT 9 /* OLS crossed dark threshold */
#define EVENT_OLS_LOW_LIMIT 10 /* OLS crossed light threshold */
/*
* EC commands
* (from http://dev.laptop.org/git/users/rsmith/ec-1.75/tree/ec_cmd.h)
*/
#define CMD_GET_API_VERSION 0x08 /* out: u8 */
#define CMD_READ_VOLTAGE 0x10 /* out: u16, *9.76/32, mV */
#define CMD_READ_CURRENT 0x11 /* out: s16, *15.625/120, mA */
#define CMD_READ_ACR 0x12 /* out: s16, *6250/15, uAh */
#define CMD_READ_BATT_TEMPERATURE 0x13 /* out: u16, *100/256, deg C */
#define CMD_READ_AMBIENT_TEMPERATURE 0x14 /* unimplemented, no hardware */
#define CMD_READ_BATTERY_STATUS 0x15 /* out: u8, bitmask */
#define CMD_READ_SOC 0x16 /* out: u8, percentage */
#define CMD_READ_GAUGE_ID 0x17 /* out: u8 * 8 */
#define CMD_READ_GAUGE_DATA 0x18 /* in: u8 addr, out: u8 data */
#define CMD_READ_BOARD_ID 0x19 /* out: u16 (platform id) */
#define CMD_READ_BATT_ERR_CODE 0x1f /* out: u8, error bitmask */
#define CMD_SET_DCON_POWER 0x26 /* in: u8 */
#define CMD_RESET_EC 0x28 /* none */
#define CMD_READ_BATTERY_TYPE 0x2c /* out: u8 */
#define CMD_SET_AUTOWAK 0x33 /* out: u8 */
#define CMD_SET_EC_WAKEUP_TIMER 0x36 /* in: u32, out: ? */
#define CMD_READ_EXT_SCI_MASK 0x37 /* ? */
#define CMD_WRITE_EXT_SCI_MASK 0x38 /* ? */
#define CMD_CLEAR_EC_WAKEUP_TIMER 0x39 /* none */
#define CMD_ENABLE_RUNIN_DISCHARGE 0x3B /* none */
#define CMD_DISABLE_RUNIN_DISCHARGE 0x3C /* none */
#define CMD_READ_MPPT_ACTIVE 0x3d /* out: u8 */
#define CMD_READ_MPPT_LIMIT 0x3e /* out: u8 */
#define CMD_SET_MPPT_LIMIT 0x3f /* in: u8 */
#define CMD_DISABLE_MPPT 0x40 /* none */
#define CMD_ENABLE_MPPT 0x41 /* none */
#define CMD_READ_VIN 0x42 /* out: u16 */
#define CMD_EXT_SCI_QUERY 0x43 /* ? */
#define RSP_KEYBOARD_DATA 0x48 /* ? */
#define RSP_TOUCHPAD_DATA 0x49 /* ? */
#define CMD_GET_FW_VERSION 0x4a /* out: u8 * 16 */
#define CMD_POWER_CYCLE 0x4b /* none */
#define CMD_POWER_OFF 0x4c /* none */
#define CMD_RESET_EC_SOFT 0x4d /* none */
#define CMD_READ_GAUGE_U16 0x4e /* ? */
#define CMD_ENABLE_MOUSE 0x4f /* ? */
#define CMD_ECHO 0x52 /* in: u8 * 5, out: u8 * 5 */
#define CMD_GET_FW_DATE 0x53 /* out: u8 * 16 */
#define CMD_GET_FW_USER 0x54 /* out: u8 * 16 */
#define CMD_TURN_OFF_POWER 0x55 /* none (same as 0x4c) */
#define CMD_READ_OLS 0x56 /* out: u16 */
#define CMD_OLS_SMT_LEDON 0x57 /* none */
#define CMD_OLS_SMT_LEDOFF 0x58 /* none */
#define CMD_START_OLS_ASSY 0x59 /* none */
#define CMD_STOP_OLS_ASSY 0x5a /* none */
#define CMD_OLS_SMTTEST_STOP 0x5b /* none */
#define CMD_READ_VIN_SCALED 0x5c /* out: u16 */
#define CMD_READ_BAT_MIN_W 0x5d /* out: u16 */
#define CMD_READ_BAR_MAX_W 0x5e /* out: u16 */
#define CMD_RESET_BAT_MINMAX_W 0x5f /* none */
#define CMD_READ_LOCATION 0x60 /* in: u16 addr, out: u8 data */
#define CMD_WRITE_LOCATION 0x61 /* in: u16 addr, u8 data */
#define CMD_KEYBOARD_CMD 0x62 /* in: u8, out: ? */
#define CMD_TOUCHPAD_CMD 0x63 /* in: u8, out: ? */
#define CMD_GET_FW_HASH 0x64 /* out: u8 * 16 */
#define CMD_SUSPEND_HINT 0x65 /* in: u8 */
#define CMD_ENABLE_WAKE_TIMER 0x66 /* in: u8 */
#define CMD_SET_WAKE_TIMER 0x67 /* in: 32 */
#define CMD_ENABLE_WAKE_AUTORESET 0x68 /* in: u8 */
#define CMD_OLS_SET_LIMITS 0x69 /* in: u16, u16 */
#define CMD_OLS_GET_LIMITS 0x6a /* out: u16, u16 */
#define CMD_OLS_SET_CEILING 0x6b /* in: u16 */
#define CMD_OLS_GET_CEILING 0x6c /* out: u16 */
/*
* Accepted EC commands, and how many bytes they return. There are plenty
* of EC commands that are no longer implemented, or are implemented only on
* certain older boards.
*/
static const struct ec_cmd_t olpc_xo175_ec_cmds[] = {
{ CMD_GET_API_VERSION, 1 },
{ CMD_READ_VOLTAGE, 2 },
{ CMD_READ_CURRENT, 2 },
{ CMD_READ_ACR, 2 },
{ CMD_READ_BATT_TEMPERATURE, 2 },
{ CMD_READ_BATTERY_STATUS, 1 },
{ CMD_READ_SOC, 1 },
{ CMD_READ_GAUGE_ID, 8 },
{ CMD_READ_GAUGE_DATA, 1 },
{ CMD_READ_BOARD_ID, 2 },
{ CMD_READ_BATT_ERR_CODE, 1 },
{ CMD_SET_DCON_POWER, 0 },
{ CMD_RESET_EC, 0 },
{ CMD_READ_BATTERY_TYPE, 1 },
{ CMD_ENABLE_RUNIN_DISCHARGE, 0 },
{ CMD_DISABLE_RUNIN_DISCHARGE, 0 },
{ CMD_READ_MPPT_ACTIVE, 1 },
{ CMD_READ_MPPT_LIMIT, 1 },
{ CMD_SET_MPPT_LIMIT, 0 },
{ CMD_DISABLE_MPPT, 0 },
{ CMD_ENABLE_MPPT, 0 },
{ CMD_READ_VIN, 2 },
{ CMD_GET_FW_VERSION, 16 },
{ CMD_POWER_CYCLE, 0 },
{ CMD_POWER_OFF, 0 },
{ CMD_RESET_EC_SOFT, 0 },
{ CMD_ECHO, 5 },
{ CMD_GET_FW_DATE, 16 },
{ CMD_GET_FW_USER, 16 },
{ CMD_TURN_OFF_POWER, 0 },
{ CMD_READ_OLS, 2 },
{ CMD_OLS_SMT_LEDON, 0 },
{ CMD_OLS_SMT_LEDOFF, 0 },
{ CMD_START_OLS_ASSY, 0 },
{ CMD_STOP_OLS_ASSY, 0 },
{ CMD_OLS_SMTTEST_STOP, 0 },
{ CMD_READ_VIN_SCALED, 2 },
{ CMD_READ_BAT_MIN_W, 2 },
{ CMD_READ_BAR_MAX_W, 2 },
{ CMD_RESET_BAT_MINMAX_W, 0 },
{ CMD_READ_LOCATION, 1 },
{ CMD_WRITE_LOCATION, 0 },
{ CMD_GET_FW_HASH, 16 },
{ CMD_SUSPEND_HINT, 0 },
{ CMD_ENABLE_WAKE_TIMER, 0 },
{ CMD_SET_WAKE_TIMER, 0 },
{ CMD_ENABLE_WAKE_AUTORESET, 0 },
{ CMD_OLS_SET_LIMITS, 0 },
{ CMD_OLS_GET_LIMITS, 4 },
{ CMD_OLS_SET_CEILING, 0 },
{ CMD_OLS_GET_CEILING, 2 },
{ CMD_READ_EXT_SCI_MASK, 2 },
{ CMD_WRITE_EXT_SCI_MASK, 0 },
{ }
};
#define EC_MAX_CMD_DATA_LEN 5
#define EC_MAX_RESP_LEN 16
#define LOG_BUF_SIZE 128
#define PM_WAKEUP_TIME 1000
#define EC_ALL_EVENTS GENMASK(15, 0)
enum ec_state_t {
CMD_STATE_IDLE = 0,
CMD_STATE_WAITING_FOR_SWITCH,
CMD_STATE_CMD_IN_TX_FIFO,
CMD_STATE_CMD_SENT,
CMD_STATE_RESP_RECEIVED,
CMD_STATE_ERROR_RECEIVED,
};
struct olpc_xo175_ec_cmd {
u8 command;
u8 nr_args;
u8 data_len;
u8 args[EC_MAX_CMD_DATA_LEN];
};
struct olpc_xo175_ec_resp {
u8 channel;
u8 byte;
};
struct olpc_xo175_ec {
bool suspended;
/* SPI related stuff. */
struct spi_device *spi;
struct spi_transfer xfer;
struct spi_message msg;
union {
struct olpc_xo175_ec_cmd cmd;
struct olpc_xo175_ec_resp resp;
} tx_buf, rx_buf;
/* GPIO for the CMD signals. */
struct gpio_desc *gpio_cmd;
/* Command handling related state. */
spinlock_t cmd_state_lock;
int cmd_state;
bool cmd_running;
struct completion cmd_done;
struct olpc_xo175_ec_cmd cmd;
u8 resp_data[EC_MAX_RESP_LEN];
int expected_resp_len;
int resp_len;
/* Power button. */
struct input_dev *pwrbtn;
/* Debug handling. */
char logbuf[LOG_BUF_SIZE];
int logbuf_len;
};
static struct platform_device *olpc_ec;
static int olpc_xo175_ec_resp_len(u8 cmd)
{
const struct ec_cmd_t *p;
for (p = olpc_xo175_ec_cmds; p->cmd; p++) {
if (p->cmd == cmd)
return p->bytes_returned;
}
return -EINVAL;
}
static void olpc_xo175_ec_flush_logbuf(struct olpc_xo175_ec *priv)
{
dev_dbg(&priv->spi->dev, "got debug string [%*pE]\n",
priv->logbuf_len, priv->logbuf);
priv->logbuf_len = 0;
}
static void olpc_xo175_ec_complete(void *arg);
static void olpc_xo175_ec_send_command(struct olpc_xo175_ec *priv, void *cmd,
size_t cmdlen)
{
int ret;
memcpy(&priv->tx_buf, cmd, cmdlen);
priv->xfer.len = cmdlen;
spi_message_init_with_transfers(&priv->msg, &priv->xfer, 1);
priv->msg.complete = olpc_xo175_ec_complete;
priv->msg.context = priv;
ret = spi_async(priv->spi, &priv->msg);
if (ret)
dev_err(&priv->spi->dev, "spi_async() failed %d\n", ret);
}
static void olpc_xo175_ec_read_packet(struct olpc_xo175_ec *priv)
{
u8 nonce[] = {0xA5, 0x5A};
olpc_xo175_ec_send_command(priv, nonce, sizeof(nonce));
}
static void olpc_xo175_ec_complete(void *arg)
{
struct olpc_xo175_ec *priv = arg;
struct device *dev = &priv->spi->dev;
struct power_supply *psy;
unsigned long flags;
u8 channel;
u8 byte;
int ret;
ret = priv->msg.status;
if (ret) {
dev_err(dev, "SPI transfer failed: %d\n", ret);
spin_lock_irqsave(&priv->cmd_state_lock, flags);
if (priv->cmd_running) {
priv->resp_len = 0;
priv->cmd_state = CMD_STATE_ERROR_RECEIVED;
complete(&priv->cmd_done);
}
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
if (ret != -EINTR)
olpc_xo175_ec_read_packet(priv);
return;
}
channel = priv->rx_buf.resp.channel;
byte = priv->rx_buf.resp.byte;
switch (channel) {
case CHAN_NONE:
spin_lock_irqsave(&priv->cmd_state_lock, flags);
if (!priv->cmd_running) {
/* We can safely ignore these */
dev_err(dev, "spurious FIFO read packet\n");
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
return;
}
priv->cmd_state = CMD_STATE_CMD_SENT;
if (!priv->expected_resp_len)
complete(&priv->cmd_done);
olpc_xo175_ec_read_packet(priv);
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
return;
case CHAN_SWITCH:
spin_lock_irqsave(&priv->cmd_state_lock, flags);
if (!priv->cmd_running) {
/* Just go with the flow */
dev_err(dev, "spurious SWITCH packet\n");
memset(&priv->cmd, 0, sizeof(priv->cmd));
priv->cmd.command = CMD_ECHO;
}
priv->cmd_state = CMD_STATE_CMD_IN_TX_FIFO;
/* Throw command into TxFIFO */
gpiod_set_value_cansleep(priv->gpio_cmd, 0);
olpc_xo175_ec_send_command(priv, &priv->cmd, sizeof(priv->cmd));
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
return;
case CHAN_CMD_RESP:
spin_lock_irqsave(&priv->cmd_state_lock, flags);
if (!priv->cmd_running) {
dev_err(dev, "spurious response packet\n");
} else if (priv->resp_len >= priv->expected_resp_len) {
dev_err(dev, "too many response packets\n");
} else {
priv->resp_data[priv->resp_len++] = byte;
if (priv->resp_len == priv->expected_resp_len) {
priv->cmd_state = CMD_STATE_RESP_RECEIVED;
complete(&priv->cmd_done);
}
}
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
break;
case CHAN_CMD_ERROR:
spin_lock_irqsave(&priv->cmd_state_lock, flags);
if (!priv->cmd_running) {
dev_err(dev, "spurious cmd error packet\n");
} else {
priv->resp_data[0] = byte;
priv->resp_len = 1;
priv->cmd_state = CMD_STATE_ERROR_RECEIVED;
complete(&priv->cmd_done);
}
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
break;
case CHAN_KEYBOARD:
dev_warn(dev, "keyboard is not supported\n");
break;
case CHAN_TOUCHPAD:
dev_warn(dev, "touchpad is not supported\n");
break;
case CHAN_EVENT:
dev_dbg(dev, "got event %.2x\n", byte);
switch (byte) {
case EVENT_AC_CHANGE:
psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
power_supply_put(psy);
}
break;
case EVENT_BATTERY_STATUS:
case EVENT_BATTERY_CRITICAL:
case EVENT_BATTERY_SOC_CHANGE:
case EVENT_BATTERY_ERROR:
psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
power_supply_put(psy);
}
break;
case EVENT_POWER_PRESSED:
input_report_key(priv->pwrbtn, KEY_POWER, 1);
input_sync(priv->pwrbtn);
input_report_key(priv->pwrbtn, KEY_POWER, 0);
input_sync(priv->pwrbtn);
fallthrough;
case EVENT_POWER_PRESS_WAKE:
case EVENT_TIMED_HOST_WAKE:
pm_wakeup_event(priv->pwrbtn->dev.parent,
PM_WAKEUP_TIME);
break;
default:
dev_dbg(dev, "ignored unknown event %.2x\n", byte);
break;
}
break;
case CHAN_DEBUG:
if (byte == '\n') {
olpc_xo175_ec_flush_logbuf(priv);
} else if (isprint(byte)) {
priv->logbuf[priv->logbuf_len++] = byte;
if (priv->logbuf_len == LOG_BUF_SIZE)
olpc_xo175_ec_flush_logbuf(priv);
}
break;
default:
dev_warn(dev, "unknown channel: %d, %.2x\n", channel, byte);
break;
}
/* Most non-command packets get the TxFIFO refilled and an ACK. */
olpc_xo175_ec_read_packet(priv);
}
/*
* This function is protected with a mutex. We can safely assume that
* there will be only one instance of this function running at a time.
* One of the ways in which we enforce this is by waiting until we get
* all response bytes back from the EC, rather than just the number that
* the caller requests (otherwise, we might start a new command while an
* old command's response bytes are still incoming).
*/
static int olpc_xo175_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *resp,
size_t resp_len, void *ec_cb_arg)
{
struct olpc_xo175_ec *priv = ec_cb_arg;
struct device *dev = &priv->spi->dev;
unsigned long flags;
size_t nr_bytes;
int ret = 0;
dev_dbg(dev, "CMD %x, %zd bytes expected\n", cmd, resp_len);
if (inlen > 5) {
dev_err(dev, "command len %zd too big!\n", resp_len);
return -EOVERFLOW;
}
/* Suspending in the middle of an EC command hoses things badly! */
if (WARN_ON(priv->suspended))
return -EBUSY;
/* Ensure a valid command and return bytes */
ret = olpc_xo175_ec_resp_len(cmd);
if (ret < 0) {
dev_err_ratelimited(dev, "unknown command 0x%x\n", cmd);
/*
* Assume the best in our callers, and allow unknown commands
* through. I'm not the charitable type, but it was beaten
* into me. Just maintain a minimum standard of sanity.
*/
if (resp_len > sizeof(priv->resp_data)) {
dev_err(dev, "response too big: %zd!\n", resp_len);
return -EOVERFLOW;
}
nr_bytes = resp_len;
} else {
nr_bytes = (size_t)ret;
ret = 0;
}
resp_len = min(resp_len, nr_bytes);
spin_lock_irqsave(&priv->cmd_state_lock, flags);
/* Initialize the state machine */
init_completion(&priv->cmd_done);
priv->cmd_running = true;
priv->cmd_state = CMD_STATE_WAITING_FOR_SWITCH;
memset(&priv->cmd, 0, sizeof(priv->cmd));
priv->cmd.command = cmd;
priv->cmd.nr_args = inlen;
priv->cmd.data_len = 0;
memcpy(priv->cmd.args, inbuf, inlen);
priv->expected_resp_len = nr_bytes;
priv->resp_len = 0;
/* Tickle the cmd gpio to get things started */
gpiod_set_value_cansleep(priv->gpio_cmd, 1);
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
/* The irq handler should do the rest */
if (!wait_for_completion_timeout(&priv->cmd_done,
msecs_to_jiffies(4000))) {
dev_err(dev, "EC cmd error: timeout in STATE %d\n",
priv->cmd_state);
gpiod_set_value_cansleep(priv->gpio_cmd, 0);
spi_slave_abort(priv->spi);
olpc_xo175_ec_read_packet(priv);
return -ETIMEDOUT;
}
spin_lock_irqsave(&priv->cmd_state_lock, flags);
/* Deal with the results. */
if (priv->cmd_state == CMD_STATE_ERROR_RECEIVED) {
/* EC-provided error is in the single response byte */
dev_err(dev, "command 0x%x returned error 0x%x\n",
cmd, priv->resp_data[0]);
ret = -EREMOTEIO;
} else if (priv->resp_len != nr_bytes) {
dev_err(dev, "command 0x%x returned %d bytes, expected %zd bytes\n",
cmd, priv->resp_len, nr_bytes);
ret = -EREMOTEIO;
} else {
/*
* We may have 8 bytes in priv->resp, but we only care about
* what we've been asked for. If the caller asked for only 2
* bytes, give them that. We've guaranteed that
* resp_len <= priv->resp_len and priv->resp_len == nr_bytes.
*/
memcpy(resp, priv->resp_data, resp_len);
}
/* This should already be low, but just in case. */
gpiod_set_value_cansleep(priv->gpio_cmd, 0);
priv->cmd_running = false;
spin_unlock_irqrestore(&priv->cmd_state_lock, flags);
return ret;
}
static int olpc_xo175_ec_set_event_mask(unsigned int mask)
{
u8 args[2];
args[0] = mask >> 0;
args[1] = mask >> 8;
return olpc_ec_cmd(CMD_WRITE_EXT_SCI_MASK, args, 2, NULL, 0);
}
static void olpc_xo175_ec_power_off(void)
{
while (1) {
olpc_ec_cmd(CMD_POWER_OFF, NULL, 0, NULL, 0);
mdelay(1000);
}
}
static int __maybe_unused olpc_xo175_ec_suspend(struct device *dev)
{
struct olpc_xo175_ec *priv = dev_get_drvdata(dev);
static struct {
u8 suspend;
u32 suspend_count;
} __packed hintargs;
static unsigned int suspend_count;
/*
* SOC_SLEEP is not wired to the EC on B3 and earlier boards.
* This command lets the EC know instead. The suspend count doesn't seem
* to be used anywhere but in the EC debug output.
*/
hintargs.suspend = 1;
hintargs.suspend_count = suspend_count++;
olpc_ec_cmd(CMD_SUSPEND_HINT, (void *)&hintargs, sizeof(hintargs),
NULL, 0);
/*
* After we've sent the suspend hint, don't allow further EC commands
* to be run until we've resumed. Userspace tasks should be frozen,
* but kernel threads and interrupts could still schedule EC commands.
*/
priv->suspended = true;
return 0;
}
static int __maybe_unused olpc_xo175_ec_resume_noirq(struct device *dev)
{
struct olpc_xo175_ec *priv = dev_get_drvdata(dev);
priv->suspended = false;
return 0;
}
static int __maybe_unused olpc_xo175_ec_resume(struct device *dev)
{
u8 x = 0;
/*
* The resume hint is only needed if no other commands are
* being sent during resume. all it does is tell the EC
* the SoC is definitely awake.
*/
olpc_ec_cmd(CMD_SUSPEND_HINT, &x, 1, NULL, 0);
/* Enable all EC events while we're awake */
olpc_xo175_ec_set_event_mask(EC_ALL_EVENTS);
return 0;
}
static struct olpc_ec_driver olpc_xo175_ec_driver = {
.ec_cmd = olpc_xo175_ec_cmd,
};
static void olpc_xo175_ec_remove(struct spi_device *spi)
{
if (pm_power_off == olpc_xo175_ec_power_off)
pm_power_off = NULL;
spi_slave_abort(spi);
platform_device_unregister(olpc_ec);
olpc_ec = NULL;
}
static int olpc_xo175_ec_probe(struct spi_device *spi)
{
struct olpc_xo175_ec *priv;
int ret;
if (olpc_ec) {
dev_err(&spi->dev, "OLPC EC already registered.\n");
return -EBUSY;
}
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->gpio_cmd = devm_gpiod_get(&spi->dev, "cmd", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpio_cmd)) {
dev_err(&spi->dev, "failed to get cmd gpio: %ld\n",
PTR_ERR(priv->gpio_cmd));
return PTR_ERR(priv->gpio_cmd);
}
priv->spi = spi;
spin_lock_init(&priv->cmd_state_lock);
priv->cmd_state = CMD_STATE_IDLE;
init_completion(&priv->cmd_done);
priv->logbuf_len = 0;
/* Set up power button input device */
priv->pwrbtn = devm_input_allocate_device(&spi->dev);
if (!priv->pwrbtn)
return -ENOMEM;
priv->pwrbtn->name = "Power Button";
priv->pwrbtn->dev.parent = &spi->dev;
input_set_capability(priv->pwrbtn, EV_KEY, KEY_POWER);
ret = input_register_device(priv->pwrbtn);
if (ret) {
dev_err(&spi->dev, "error registering input device: %d\n", ret);
return ret;
}
spi_set_drvdata(spi, priv);
priv->xfer.rx_buf = &priv->rx_buf;
priv->xfer.tx_buf = &priv->tx_buf;
olpc_xo175_ec_read_packet(priv);
olpc_ec_driver_register(&olpc_xo175_ec_driver, priv);
olpc_ec = platform_device_register_resndata(&spi->dev, "olpc-ec", -1,
NULL, 0, NULL, 0);
/* Enable all EC events while we're awake */
olpc_xo175_ec_set_event_mask(EC_ALL_EVENTS);
if (pm_power_off == NULL)
pm_power_off = olpc_xo175_ec_power_off;
dev_info(&spi->dev, "OLPC XO-1.75 Embedded Controller driver\n");
return 0;
}
static const struct dev_pm_ops olpc_xo175_ec_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, olpc_xo175_ec_resume_noirq)
SET_RUNTIME_PM_OPS(olpc_xo175_ec_suspend, olpc_xo175_ec_resume, NULL)
};
static const struct of_device_id olpc_xo175_ec_of_match[] = {
{ .compatible = "olpc,xo1.75-ec" },
{ }
};
MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
static const struct spi_device_id olpc_xo175_ec_id_table[] = {
{ "xo1.75-ec", 0 },
{}
};
MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
static struct spi_driver olpc_xo175_ec_spi_driver = {
.driver = {
.name = "olpc-xo175-ec",
.of_match_table = olpc_xo175_ec_of_match,
.pm = &olpc_xo175_ec_pm_ops,
},
.id_table = olpc_xo175_ec_id_table,
.probe = olpc_xo175_ec_probe,
.remove = olpc_xo175_ec_remove,
};
module_spi_driver(olpc_xo175_ec_spi_driver);
MODULE_DESCRIPTION("OLPC XO-1.75 Embedded Controller driver");
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>"); /* Functionality */
MODULE_AUTHOR("Lubomir Rintel <[email protected]>"); /* Bugs */
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/olpc/olpc-xo175-ec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic driver for the OLPC Embedded Controller.
*
* Author: Andres Salomon <[email protected]>
*
* Copyright (C) 2011-2012 One Laptop per Child Foundation.
*/
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/regulator/driver.h>
#include <linux/olpc-ec.h>
struct ec_cmd_desc {
u8 cmd;
u8 *inbuf, *outbuf;
size_t inlen, outlen;
int err;
struct completion finished;
struct list_head node;
void *priv;
};
struct olpc_ec_priv {
struct olpc_ec_driver *drv;
u8 version;
struct work_struct worker;
struct mutex cmd_lock;
/* DCON regulator */
bool dcon_enabled;
/* Pending EC commands */
struct list_head cmd_q;
spinlock_t cmd_q_lock;
struct dentry *dbgfs_dir;
/*
* EC event mask to be applied during suspend (defining wakeup
* sources).
*/
u16 ec_wakeup_mask;
/*
* Running an EC command while suspending means we don't always finish
* the command before the machine suspends. This means that the EC
* is expecting the command protocol to finish, but we after a period
* of time (while the OS is asleep) the EC times out and restarts its
* idle loop. Meanwhile, the OS wakes up, thinks it's still in the
* middle of the command protocol, starts throwing random things at
* the EC... and everyone's uphappy.
*/
bool suspended;
};
static struct olpc_ec_driver *ec_driver;
static struct olpc_ec_priv *ec_priv;
static void *ec_cb_arg;
void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg)
{
ec_driver = drv;
ec_cb_arg = arg;
}
EXPORT_SYMBOL_GPL(olpc_ec_driver_register);
static void olpc_ec_worker(struct work_struct *w)
{
struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker);
struct ec_cmd_desc *desc = NULL;
unsigned long flags;
/* Grab the first pending command from the queue */
spin_lock_irqsave(&ec->cmd_q_lock, flags);
if (!list_empty(&ec->cmd_q)) {
desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node);
list_del(&desc->node);
}
spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
/* Do we actually have anything to do? */
if (!desc)
return;
/* Protect the EC hw with a mutex; only run one cmd at a time */
mutex_lock(&ec->cmd_lock);
desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen,
desc->outbuf, desc->outlen, ec_cb_arg);
mutex_unlock(&ec->cmd_lock);
/* Finished, wake up olpc_ec_cmd() */
complete(&desc->finished);
/* Run the worker thread again in case there are more cmds pending */
schedule_work(&ec->worker);
}
/*
* Throw a cmd descripter onto the list. We now have SMP OLPC machines, so
* locking is pretty critical.
*/
static void queue_ec_descriptor(struct ec_cmd_desc *desc,
struct olpc_ec_priv *ec)
{
unsigned long flags;
INIT_LIST_HEAD(&desc->node);
spin_lock_irqsave(&ec->cmd_q_lock, flags);
list_add_tail(&desc->node, &ec->cmd_q);
spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
schedule_work(&ec->worker);
}
int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen)
{
struct olpc_ec_priv *ec = ec_priv;
struct ec_cmd_desc desc;
/* Driver not yet registered. */
if (!ec_driver)
return -EPROBE_DEFER;
if (WARN_ON(!ec_driver->ec_cmd))
return -ENODEV;
if (!ec)
return -ENOMEM;
/* Suspending in the middle of a command hoses things really badly */
if (WARN_ON(ec->suspended))
return -EBUSY;
might_sleep();
desc.cmd = cmd;
desc.inbuf = inbuf;
desc.outbuf = outbuf;
desc.inlen = inlen;
desc.outlen = outlen;
desc.err = 0;
init_completion(&desc.finished);
queue_ec_descriptor(&desc, ec);
/* Timeouts must be handled in the platform-specific EC hook */
wait_for_completion(&desc.finished);
/* The worker thread dequeues the cmd; no need to do anything here */
return desc.err;
}
EXPORT_SYMBOL_GPL(olpc_ec_cmd);
void olpc_ec_wakeup_set(u16 value)
{
struct olpc_ec_priv *ec = ec_priv;
if (WARN_ON(!ec))
return;
ec->ec_wakeup_mask |= value;
}
EXPORT_SYMBOL_GPL(olpc_ec_wakeup_set);
void olpc_ec_wakeup_clear(u16 value)
{
struct olpc_ec_priv *ec = ec_priv;
if (WARN_ON(!ec))
return;
ec->ec_wakeup_mask &= ~value;
}
EXPORT_SYMBOL_GPL(olpc_ec_wakeup_clear);
int olpc_ec_mask_write(u16 bits)
{
struct olpc_ec_priv *ec = ec_priv;
if (WARN_ON(!ec))
return -ENODEV;
/* EC version 0x5f adds support for wide SCI mask */
if (ec->version >= 0x5f) {
__be16 ec_word = cpu_to_be16(bits);
return olpc_ec_cmd(EC_WRITE_EXT_SCI_MASK, (void *)&ec_word, 2, NULL, 0);
} else {
u8 ec_byte = bits & 0xff;
return olpc_ec_cmd(EC_WRITE_SCI_MASK, &ec_byte, 1, NULL, 0);
}
}
EXPORT_SYMBOL_GPL(olpc_ec_mask_write);
/*
* Returns true if the compile and runtime configurations allow for EC events
* to wake the system.
*/
bool olpc_ec_wakeup_available(void)
{
if (WARN_ON(!ec_driver))
return false;
return ec_driver->wakeup_available;
}
EXPORT_SYMBOL_GPL(olpc_ec_wakeup_available);
int olpc_ec_sci_query(u16 *sci_value)
{
struct olpc_ec_priv *ec = ec_priv;
int ret;
if (WARN_ON(!ec))
return -ENODEV;
/* EC version 0x5f adds support for wide SCI mask */
if (ec->version >= 0x5f) {
__be16 ec_word;
ret = olpc_ec_cmd(EC_EXT_SCI_QUERY, NULL, 0, (void *)&ec_word, 2);
if (ret == 0)
*sci_value = be16_to_cpu(ec_word);
} else {
u8 ec_byte;
ret = olpc_ec_cmd(EC_SCI_QUERY, NULL, 0, &ec_byte, 1);
if (ret == 0)
*sci_value = ec_byte;
}
return ret;
}
EXPORT_SYMBOL_GPL(olpc_ec_sci_query);
#ifdef CONFIG_DEBUG_FS
/*
* debugfs support for "generic commands", to allow sending
* arbitrary EC commands from userspace.
*/
#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
#define EC_MAX_CMD_REPLY (8)
static DEFINE_MUTEX(ec_dbgfs_lock);
static unsigned char ec_dbgfs_resp[EC_MAX_CMD_REPLY];
static unsigned int ec_dbgfs_resp_bytes;
static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
int i, m;
unsigned char ec_cmd[EC_MAX_CMD_ARGS];
unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
char cmdbuf[64] = "";
int ec_cmd_bytes;
mutex_lock(&ec_dbgfs_lock);
size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
&ec_dbgfs_resp_bytes, &ec_cmd_int[1], &ec_cmd_int[2],
&ec_cmd_int[3], &ec_cmd_int[4], &ec_cmd_int[5]);
if (m < 2 || ec_dbgfs_resp_bytes > EC_MAX_CMD_REPLY) {
/* reset to prevent overflow on read */
ec_dbgfs_resp_bytes = 0;
pr_debug("olpc-ec: bad ec cmd: cmd:response-count [arg1 [arg2 ...]]\n");
size = -EINVAL;
goto out;
}
/* convert scanf'd ints to char */
ec_cmd_bytes = m - 2;
for (i = 0; i <= ec_cmd_bytes; i++)
ec_cmd[i] = ec_cmd_int[i];
pr_debug("olpc-ec: debugfs cmd 0x%02x with %d args %5ph, want %d returns\n",
ec_cmd[0], ec_cmd_bytes, ec_cmd + 1,
ec_dbgfs_resp_bytes);
olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
ec_cmd_bytes, ec_dbgfs_resp, ec_dbgfs_resp_bytes);
pr_debug("olpc-ec: response %8ph (%d bytes expected)\n",
ec_dbgfs_resp, ec_dbgfs_resp_bytes);
out:
mutex_unlock(&ec_dbgfs_lock);
return size;
}
static ssize_t ec_dbgfs_cmd_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
unsigned int i, r;
char *rp;
char respbuf[64];
mutex_lock(&ec_dbgfs_lock);
rp = respbuf;
rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]);
for (i = 1; i < ec_dbgfs_resp_bytes; i++)
rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]);
mutex_unlock(&ec_dbgfs_lock);
rp += sprintf(rp, "\n");
r = rp - respbuf;
return simple_read_from_buffer(buf, size, ppos, respbuf, r);
}
static const struct file_operations ec_dbgfs_ops = {
.write = ec_dbgfs_cmd_write,
.read = ec_dbgfs_cmd_read,
};
static struct dentry *olpc_ec_setup_debugfs(void)
{
struct dentry *dbgfs_dir;
dbgfs_dir = debugfs_create_dir("olpc-ec", NULL);
if (IS_ERR_OR_NULL(dbgfs_dir))
return NULL;
debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops);
return dbgfs_dir;
}
#else
static struct dentry *olpc_ec_setup_debugfs(void)
{
return NULL;
}
#endif /* CONFIG_DEBUG_FS */
static int olpc_ec_set_dcon_power(struct olpc_ec_priv *ec, bool state)
{
unsigned char ec_byte = state;
int ret;
if (ec->dcon_enabled == state)
return 0;
ret = olpc_ec_cmd(EC_DCON_POWER_MODE, &ec_byte, 1, NULL, 0);
if (ret)
return ret;
ec->dcon_enabled = state;
return 0;
}
static int dcon_regulator_enable(struct regulator_dev *rdev)
{
struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
return olpc_ec_set_dcon_power(ec, true);
}
static int dcon_regulator_disable(struct regulator_dev *rdev)
{
struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
return olpc_ec_set_dcon_power(ec, false);
}
static int dcon_regulator_is_enabled(struct regulator_dev *rdev)
{
struct olpc_ec_priv *ec = rdev_get_drvdata(rdev);
return ec->dcon_enabled ? 1 : 0;
}
static const struct regulator_ops dcon_regulator_ops = {
.enable = dcon_regulator_enable,
.disable = dcon_regulator_disable,
.is_enabled = dcon_regulator_is_enabled,
};
static const struct regulator_desc dcon_desc = {
.name = "dcon",
.id = 0,
.ops = &dcon_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_time = 25000,
};
static int olpc_ec_probe(struct platform_device *pdev)
{
struct olpc_ec_priv *ec;
struct regulator_config config = { };
struct regulator_dev *regulator;
int err;
if (!ec_driver)
return -ENODEV;
ec = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
return -ENOMEM;
ec->drv = ec_driver;
INIT_WORK(&ec->worker, olpc_ec_worker);
mutex_init(&ec->cmd_lock);
INIT_LIST_HEAD(&ec->cmd_q);
spin_lock_init(&ec->cmd_q_lock);
ec_priv = ec;
platform_set_drvdata(pdev, ec);
/* get the EC revision */
err = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ec->version, 1);
if (err)
goto error;
config.dev = pdev->dev.parent;
config.driver_data = ec;
ec->dcon_enabled = true;
regulator = devm_regulator_register(&pdev->dev, &dcon_desc, &config);
if (IS_ERR(regulator)) {
dev_err(&pdev->dev, "failed to register DCON regulator\n");
err = PTR_ERR(regulator);
goto error;
}
ec->dbgfs_dir = olpc_ec_setup_debugfs();
return 0;
error:
ec_priv = NULL;
kfree(ec);
return err;
}
static int olpc_ec_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
int err = 0;
olpc_ec_mask_write(ec->ec_wakeup_mask);
if (ec_driver->suspend)
err = ec_driver->suspend(pdev);
if (!err)
ec->suspended = true;
return err;
}
static int olpc_ec_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
ec->suspended = false;
return ec_driver->resume ? ec_driver->resume(pdev) : 0;
}
static const struct dev_pm_ops olpc_ec_pm_ops = {
.suspend_late = olpc_ec_suspend,
.resume_early = olpc_ec_resume,
};
static struct platform_driver olpc_ec_plat_driver = {
.probe = olpc_ec_probe,
.driver = {
.name = "olpc-ec",
.pm = &olpc_ec_pm_ops,
},
};
static int __init olpc_ec_init_module(void)
{
return platform_driver_register(&olpc_ec_plat_driver);
}
arch_initcall(olpc_ec_init_module);
| linux-master | drivers/platform/olpc/olpc-ec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
* Copyright (C) 2014 Linaro Limited
* Copyright (C) 2011-2016 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* This source file contains the implementation of a special device driver
* that intends to provide a *very* fast communication channel between the
* guest system and the QEMU emulator.
*
* Usage from the guest is simply the following (error handling simplified):
*
* int fd = open("/dev/qemu_pipe",O_RDWR);
* .... write() or read() through the pipe.
*
* This driver doesn't deal with the exact protocol used during the session.
* It is intended to be as simple as something like:
*
* // do this _just_ after opening the fd to connect to a specific
* // emulator service.
* const char* msg = "<pipename>";
* if (write(fd, msg, strlen(msg)+1) < 0) {
* ... could not connect to <pipename> service
* close(fd);
* }
*
* // after this, simply read() and write() to communicate with the
* // service. Exact protocol details left as an exercise to the reader.
*
* This driver is very fast because it doesn't copy any data through
* intermediate buffers, since the emulator is capable of translating
* guest user addresses into host ones.
*
* Note that we must however ensure that each user page involved in the
* exchange is properly mapped during a transfer.
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/acpi.h>
#include <linux/bug.h>
#include "goldfish_pipe_qemu.h"
/*
* Update this when something changes in the driver's behavior so the host
* can benefit from knowing it
*/
enum {
PIPE_DRIVER_VERSION = 2,
PIPE_CURRENT_DEVICE_VERSION = 2
};
enum {
MAX_BUFFERS_PER_COMMAND = 336,
MAX_SIGNALLED_PIPES = 64,
INITIAL_PIPES_CAPACITY = 64
};
struct goldfish_pipe_dev;
/* A per-pipe command structure, shared with the host */
struct goldfish_pipe_command {
s32 cmd; /* PipeCmdCode, guest -> host */
s32 id; /* pipe id, guest -> host */
s32 status; /* command execution status, host -> guest */
s32 reserved; /* to pad to 64-bit boundary */
union {
/* Parameters for PIPE_CMD_{READ,WRITE} */
struct {
/* number of buffers, guest -> host */
u32 buffers_count;
/* number of consumed bytes, host -> guest */
s32 consumed_size;
/* buffer pointers, guest -> host */
u64 ptrs[MAX_BUFFERS_PER_COMMAND];
/* buffer sizes, guest -> host */
u32 sizes[MAX_BUFFERS_PER_COMMAND];
} rw_params;
};
};
/* A single signalled pipe information */
struct signalled_pipe_buffer {
u32 id;
u32 flags;
};
/* Parameters for the PIPE_CMD_OPEN command */
struct open_command_param {
u64 command_buffer_ptr;
u32 rw_params_max_count;
};
/* Device-level set of buffers shared with the host */
struct goldfish_pipe_dev_buffers {
struct open_command_param open_command_params;
struct signalled_pipe_buffer
signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
};
/* This data type models a given pipe instance */
struct goldfish_pipe {
/* pipe ID - index into goldfish_pipe_dev::pipes array */
u32 id;
/* The wake flags pipe is waiting for
* Note: not protected with any lock, uses atomic operations
* and barriers to make it thread-safe.
*/
unsigned long flags;
/* wake flags host have signalled,
* - protected by goldfish_pipe_dev::lock
*/
unsigned long signalled_flags;
/* A pointer to command buffer */
struct goldfish_pipe_command *command_buffer;
/* doubly linked list of signalled pipes, protected by
* goldfish_pipe_dev::lock
*/
struct goldfish_pipe *prev_signalled;
struct goldfish_pipe *next_signalled;
/*
* A pipe's own lock. Protects the following:
* - *command_buffer - makes sure a command can safely write its
* parameters to the host and read the results back.
*/
struct mutex lock;
/* A wake queue for sleeping until host signals an event */
wait_queue_head_t wake_queue;
/* Pointer to the parent goldfish_pipe_dev instance */
struct goldfish_pipe_dev *dev;
/* A buffer of pages, too large to fit into a stack frame */
struct page *pages[MAX_BUFFERS_PER_COMMAND];
};
/* The global driver data. Holds a reference to the i/o page used to
* communicate with the emulator, and a wake queue for blocked tasks
* waiting to be awoken.
*/
struct goldfish_pipe_dev {
/* A magic number to check if this is an instance of this struct */
void *magic;
/*
* Global device spinlock. Protects the following members:
* - pipes, pipes_capacity
* - [*pipes, *pipes + pipes_capacity) - array data
* - first_signalled_pipe,
* goldfish_pipe::prev_signalled,
* goldfish_pipe::next_signalled,
* goldfish_pipe::signalled_flags - all singnalled-related fields,
* in all allocated pipes
* - open_command_params - PIPE_CMD_OPEN-related buffers
*
* It looks like a lot of different fields, but the trick is that
* the only operation that happens often is the signalled pipes array
* manipulation. That's why it's OK for now to keep the rest of the
* fields under the same lock. If we notice too much contention because
* of PIPE_CMD_OPEN, then we should add a separate lock there.
*/
spinlock_t lock;
/*
* Array of the pipes of |pipes_capacity| elements,
* indexed by goldfish_pipe::id
*/
struct goldfish_pipe **pipes;
u32 pipes_capacity;
/* Pointers to the buffers host uses for interaction with this driver */
struct goldfish_pipe_dev_buffers *buffers;
/* Head of a doubly linked list of signalled pipes */
struct goldfish_pipe *first_signalled_pipe;
/* ptr to platform device's device struct */
struct device *pdev_dev;
/* Some device-specific data */
int irq;
int version;
unsigned char __iomem *base;
struct miscdevice miscdev;
};
static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
enum PipeCmdCode cmd)
{
pipe->command_buffer->cmd = cmd;
/* failure by default */
pipe->command_buffer->status = PIPE_ERROR_INVAL;
writel(pipe->id, pipe->dev->base + PIPE_REG_CMD);
return pipe->command_buffer->status;
}
static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
int status;
if (mutex_lock_interruptible(&pipe->lock))
return PIPE_ERROR_IO;
status = goldfish_pipe_cmd_locked(pipe, cmd);
mutex_unlock(&pipe->lock);
return status;
}
/*
* This function converts an error code returned by the emulator through
* the PIPE_REG_STATUS i/o register into a valid negative errno value.
*/
static int goldfish_pipe_error_convert(int status)
{
switch (status) {
case PIPE_ERROR_AGAIN:
return -EAGAIN;
case PIPE_ERROR_NOMEM:
return -ENOMEM;
case PIPE_ERROR_IO:
return -EIO;
default:
return -EINVAL;
}
}
static int goldfish_pin_pages(unsigned long first_page,
unsigned long last_page,
unsigned int last_page_size,
int is_write,
struct page *pages[MAX_BUFFERS_PER_COMMAND],
unsigned int *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
if (requested_pages > MAX_BUFFERS_PER_COMMAND) {
requested_pages = MAX_BUFFERS_PER_COMMAND;
*iter_last_page_size = PAGE_SIZE;
} else {
*iter_last_page_size = last_page_size;
}
ret = pin_user_pages_fast(first_page, requested_pages,
!is_write ? FOLL_WRITE : 0,
pages);
if (ret <= 0)
return -EFAULT;
if (ret < requested_pages)
*iter_last_page_size = PAGE_SIZE;
return ret;
}
/* Populate the call parameters, merging adjacent pages together */
static void populate_rw_params(struct page **pages,
int pages_count,
unsigned long address,
unsigned long address_end,
unsigned long first_page,
unsigned long last_page,
unsigned int iter_last_page_size,
int is_write,
struct goldfish_pipe_command *command)
{
/*
* Process the first page separately - it's the only page that
* needs special handling for its start address.
*/
unsigned long xaddr = page_to_phys(pages[0]);
unsigned long xaddr_prev = xaddr;
int buffer_idx = 0;
int i = 1;
int size_on_page = first_page == last_page
? (int)(address_end - address)
: (PAGE_SIZE - (address & ~PAGE_MASK));
command->rw_params.ptrs[0] = (u64)(xaddr | (address & ~PAGE_MASK));
command->rw_params.sizes[0] = size_on_page;
for (; i < pages_count; ++i) {
xaddr = page_to_phys(pages[i]);
size_on_page = (i == pages_count - 1) ?
iter_last_page_size : PAGE_SIZE;
if (xaddr == xaddr_prev + PAGE_SIZE) {
command->rw_params.sizes[buffer_idx] += size_on_page;
} else {
++buffer_idx;
command->rw_params.ptrs[buffer_idx] = (u64)xaddr;
command->rw_params.sizes[buffer_idx] = size_on_page;
}
xaddr_prev = xaddr;
}
command->rw_params.buffers_count = buffer_idx + 1;
}
static int transfer_max_buffers(struct goldfish_pipe *pipe,
unsigned long address,
unsigned long address_end,
int is_write,
unsigned long last_page,
unsigned int last_page_size,
s32 *consumed_size,
int *status)
{
unsigned long first_page = address & PAGE_MASK;
unsigned int iter_last_page_size;
int pages_count;
/* Serialize access to the pipe command buffers */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
pages_count = goldfish_pin_pages(first_page, last_page,
last_page_size, is_write,
pipe->pages, &iter_last_page_size);
if (pages_count < 0) {
mutex_unlock(&pipe->lock);
return pages_count;
}
populate_rw_params(pipe->pages, pages_count, address, address_end,
first_page, last_page, iter_last_page_size, is_write,
pipe->command_buffer);
/* Transfer the data */
*status = goldfish_pipe_cmd_locked(pipe,
is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
unpin_user_pages_dirty_lock(pipe->pages, pages_count,
!is_write && *consumed_size > 0);
mutex_unlock(&pipe->lock);
return 0;
}
static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
{
u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wake_bit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
goldfish_pipe_cmd(pipe,
is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
while (test_bit(wake_bit, &pipe->flags)) {
if (wait_event_interruptible(pipe->wake_queue,
!test_bit(wake_bit, &pipe->flags)))
return -ERESTARTSYS;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
return -EIO;
}
return 0;
}
static ssize_t goldfish_pipe_read_write(struct file *filp,
char __user *buffer,
size_t bufflen,
int is_write)
{
struct goldfish_pipe *pipe = filp->private_data;
int count = 0, ret = -EINVAL;
unsigned long address, address_end, last_page;
unsigned int last_page_size;
/* If the emulator already closed the pipe, no need to go further */
if (unlikely(test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)))
return -EIO;
/* Null reads or writes succeeds */
if (unlikely(bufflen == 0))
return 0;
/* Check the buffer range for access */
if (unlikely(!access_ok(buffer, bufflen)))
return -EFAULT;
address = (unsigned long)buffer;
address_end = address + bufflen;
last_page = (address_end - 1) & PAGE_MASK;
last_page_size = ((address_end - 1) & ~PAGE_MASK) + 1;
while (address < address_end) {
s32 consumed_size;
int status;
ret = transfer_max_buffers(pipe, address, address_end, is_write,
last_page, last_page_size,
&consumed_size, &status);
if (ret < 0)
break;
if (consumed_size > 0) {
/* No matter what's the status, we've transferred
* something.
*/
count += consumed_size;
address += consumed_size;
}
if (status > 0)
continue;
if (status == 0) {
/* EOF */
ret = 0;
break;
}
if (count > 0) {
/*
* An error occurred, but we already transferred
* something on one of the previous iterations.
* Just return what we already copied and log this
* err.
*/
if (status != PIPE_ERROR_AGAIN)
dev_err_ratelimited(pipe->dev->pdev_dev,
"backend error %d on %s\n",
status, is_write ? "write" : "read");
break;
}
/*
* If the error is not PIPE_ERROR_AGAIN, or if we are in
* non-blocking mode, just return the error code.
*/
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
status = wait_for_host_signal(pipe, is_write);
if (status < 0)
return status;
}
if (count > 0)
return count;
return ret;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
size_t bufflen, loff_t *ppos)
{
return goldfish_pipe_read_write(filp, buffer, bufflen,
/* is_write */ 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
const char __user *buffer, size_t bufflen,
loff_t *ppos)
{
/* cast away the const */
char __user *no_const_buffer = (char __user *)buffer;
return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
/* is_write */ 1);
}
static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
__poll_t mask = 0;
int status;
poll_wait(filp, &pipe->wake_queue, wait);
status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
if (status < 0)
return -ERESTARTSYS;
if (status & PIPE_POLL_IN)
mask |= EPOLLIN | EPOLLRDNORM;
if (status & PIPE_POLL_OUT)
mask |= EPOLLOUT | EPOLLWRNORM;
if (status & PIPE_POLL_HUP)
mask |= EPOLLHUP;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
mask |= EPOLLERR;
return mask;
}
static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
u32 id, u32 flags)
{
struct goldfish_pipe *pipe;
if (WARN_ON(id >= dev->pipes_capacity))
return;
pipe = dev->pipes[id];
if (!pipe)
return;
pipe->signalled_flags |= flags;
if (pipe->prev_signalled || pipe->next_signalled ||
dev->first_signalled_pipe == pipe)
return; /* already in the list */
pipe->next_signalled = dev->first_signalled_pipe;
if (dev->first_signalled_pipe)
dev->first_signalled_pipe->prev_signalled = pipe;
dev->first_signalled_pipe = pipe;
}
static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
struct goldfish_pipe *pipe)
{
if (pipe->prev_signalled)
pipe->prev_signalled->next_signalled = pipe->next_signalled;
if (pipe->next_signalled)
pipe->next_signalled->prev_signalled = pipe->prev_signalled;
if (pipe == dev->first_signalled_pipe)
dev->first_signalled_pipe = pipe->next_signalled;
pipe->prev_signalled = NULL;
pipe->next_signalled = NULL;
}
static struct goldfish_pipe *signalled_pipes_pop_front(
struct goldfish_pipe_dev *dev, int *wakes)
{
struct goldfish_pipe *pipe;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
pipe = dev->first_signalled_pipe;
if (pipe) {
*wakes = pipe->signalled_flags;
pipe->signalled_flags = 0;
/*
* This is an optimized version of
* signalled_pipes_remove_locked()
* - We want to make it as fast as possible to
* wake the sleeping pipe operations faster.
*/
dev->first_signalled_pipe = pipe->next_signalled;
if (dev->first_signalled_pipe)
dev->first_signalled_pipe->prev_signalled = NULL;
pipe->next_signalled = NULL;
}
spin_unlock_irqrestore(&dev->lock, flags);
return pipe;
}
static irqreturn_t goldfish_interrupt_task(int irq, void *dev_addr)
{
/* Iterate over the signalled pipes and wake them one by one */
struct goldfish_pipe_dev *dev = dev_addr;
struct goldfish_pipe *pipe;
int wakes;
while ((pipe = signalled_pipes_pop_front(dev, &wakes)) != NULL) {
if (wakes & PIPE_WAKE_CLOSED) {
pipe->flags = 1 << BIT_CLOSED_ON_HOST;
} else {
if (wakes & PIPE_WAKE_READ)
clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
if (wakes & PIPE_WAKE_WRITE)
clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
}
/*
* wake_up_interruptible() implies a write barrier, so don't
* explicitly add another one here.
*/
wake_up_interruptible(&pipe->wake_queue);
}
return IRQ_HANDLED;
}
static void goldfish_pipe_device_deinit(struct platform_device *pdev,
struct goldfish_pipe_dev *dev);
/*
* The general idea of the (threaded) interrupt handling:
*
* 1. device raises an interrupt if there's at least one signalled pipe
* 2. IRQ handler reads the signalled pipes and their count from the device
* 3. device writes them into a shared buffer and returns the count
* it only resets the IRQ if it has returned all signalled pipes,
* otherwise it leaves it raised, so IRQ handler will be called
* again for the next chunk
* 4. IRQ handler adds all returned pipes to the device's signalled pipes list
* 5. IRQ handler defers processing the signalled pipes from the list in a
* separate context
*/
static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
{
u32 count;
u32 i;
unsigned long flags;
struct goldfish_pipe_dev *dev = dev_id;
if (dev->magic != &goldfish_pipe_device_deinit)
return IRQ_NONE;
/* Request the signalled pipes from the device */
spin_lock_irqsave(&dev->lock, flags);
count = readl(dev->base + PIPE_REG_GET_SIGNALLED);
if (count == 0) {
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_NONE;
}
if (count > MAX_SIGNALLED_PIPES)
count = MAX_SIGNALLED_PIPES;
for (i = 0; i < count; ++i)
signalled_pipes_add_locked(dev,
dev->buffers->signalled_pipe_buffers[i].id,
dev->buffers->signalled_pipe_buffers[i].flags);
spin_unlock_irqrestore(&dev->lock, flags);
return IRQ_WAKE_THREAD;
}
static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
{
int id;
for (id = 0; id < dev->pipes_capacity; ++id)
if (!dev->pipes[id])
return id;
{
/* Reallocate the array.
* Since get_free_pipe_id_locked runs with interrupts disabled,
* we don't want to make calls that could lead to sleep.
*/
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
if (!pipes)
return -ENOMEM;
memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
kfree(dev->pipes);
dev->pipes = pipes;
id = dev->pipes_capacity;
dev->pipes_capacity = new_capacity;
}
return id;
}
/* A helper function to get the instance of goldfish_pipe_dev from file */
static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
{
struct miscdevice *miscdev = file->private_data;
return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
}
/**
* goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
* @file: file struct of opener
*
* Create a new pipe link between the emulator and the use application.
* Each new request produces a new pipe.
*
* Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
* right now so this is fine. A move to 64bit will need this addressing
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
unsigned long flags;
int id;
int status;
/* Allocate new pipe kernel object */
struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
if (!pipe)
return -ENOMEM;
pipe->dev = dev;
mutex_init(&pipe->lock);
init_waitqueue_head(&pipe->wake_queue);
/*
* Command buffer needs to be allocated on its own page to make sure
* it is physically contiguous in host's address space.
*/
BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
pipe->command_buffer =
(struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
if (!pipe->command_buffer) {
status = -ENOMEM;
goto err_pipe;
}
spin_lock_irqsave(&dev->lock, flags);
id = get_free_pipe_id_locked(dev);
if (id < 0) {
status = id;
goto err_id_locked;
}
dev->pipes[id] = pipe;
pipe->id = id;
pipe->command_buffer->id = id;
/* Now tell the emulator we're opening a new pipe. */
dev->buffers->open_command_params.rw_params_max_count =
MAX_BUFFERS_PER_COMMAND;
dev->buffers->open_command_params.command_buffer_ptr =
(u64)(unsigned long)__pa(pipe->command_buffer);
status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
spin_unlock_irqrestore(&dev->lock, flags);
if (status < 0)
goto err_cmd;
/* All is done, save the pipe into the file's private data field */
file->private_data = pipe;
return 0;
err_cmd:
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[id] = NULL;
err_id_locked:
spin_unlock_irqrestore(&dev->lock, flags);
free_page((unsigned long)pipe->command_buffer);
err_pipe:
kfree(pipe);
return status;
}
static int goldfish_pipe_release(struct inode *inode, struct file *filp)
{
unsigned long flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
/* The guest is closing the channel, so tell the emulator right now */
goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[pipe->id] = NULL;
signalled_pipes_remove_locked(dev, pipe);
spin_unlock_irqrestore(&dev->lock, flags);
filp->private_data = NULL;
free_page((unsigned long)pipe->command_buffer);
kfree(pipe);
return 0;
}
static const struct file_operations goldfish_pipe_fops = {
.owner = THIS_MODULE,
.read = goldfish_pipe_read,
.write = goldfish_pipe_write,
.poll = goldfish_pipe_poll,
.open = goldfish_pipe_open,
.release = goldfish_pipe_release,
};
static void init_miscdevice(struct miscdevice *miscdev)
{
memset(miscdev, 0, sizeof(*miscdev));
miscdev->minor = MISC_DYNAMIC_MINOR;
miscdev->name = "goldfish_pipe";
miscdev->fops = &goldfish_pipe_fops;
}
static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
{
const unsigned long paddr = __pa(addr);
writel(upper_32_bits(paddr), porth);
writel(lower_32_bits(paddr), portl);
}
static int goldfish_pipe_device_init(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
int err;
err = devm_request_threaded_irq(&pdev->dev, dev->irq,
goldfish_pipe_interrupt,
goldfish_interrupt_task,
IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
return err;
}
init_miscdevice(&dev->miscdev);
err = misc_register(&dev->miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v2 device\n");
return err;
}
dev->pdev_dev = &pdev->dev;
dev->first_signalled_pipe = NULL;
dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
GFP_KERNEL);
if (!dev->pipes) {
misc_deregister(&dev->miscdev);
return -ENOMEM;
}
/*
* We're going to pass two buffers, open_command_params and
* signalled_pipe_buffers, to the host. This means each of those buffers
* needs to be contained in a single physical page. The easiest choice
* is to just allocate a page and place the buffers in it.
*/
BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
dev->buffers = (struct goldfish_pipe_dev_buffers *)
__get_free_page(GFP_KERNEL);
if (!dev->buffers) {
kfree(dev->pipes);
misc_deregister(&dev->miscdev);
return -ENOMEM;
}
/* Send the buffer addresses to the host */
write_pa_addr(&dev->buffers->signalled_pipe_buffers,
dev->base + PIPE_REG_SIGNAL_BUFFER,
dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
writel(MAX_SIGNALLED_PIPES,
dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
write_pa_addr(&dev->buffers->open_command_params,
dev->base + PIPE_REG_OPEN_BUFFER,
dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
platform_set_drvdata(pdev, dev);
return 0;
}
static void goldfish_pipe_device_deinit(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
misc_deregister(&dev->miscdev);
kfree(dev->pipes);
free_page((unsigned long)dev->buffers);
}
static int goldfish_pipe_probe(struct platform_device *pdev)
{
struct resource *r;
struct goldfish_pipe_dev *dev;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->magic = &goldfish_pipe_device_deinit;
spin_lock_init(&dev->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r || resource_size(r) < PAGE_SIZE) {
dev_err(&pdev->dev, "can't allocate i/o page\n");
return -EINVAL;
}
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (!dev->base) {
dev_err(&pdev->dev, "ioremap failed\n");
return -EINVAL;
}
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
return dev->irq;
/*
* Exchange the versions with the host device
*
* Note: v1 driver used to not report its version, so we write it before
* reading device version back: this allows the host implementation to
* detect the old driver (if there was no version write before read).
*/
writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
dev->version = readl(dev->base + PIPE_REG_VERSION);
if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
return -EINVAL;
return goldfish_pipe_device_init(pdev, dev);
}
static int goldfish_pipe_remove(struct platform_device *pdev)
{
struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
goldfish_pipe_device_deinit(pdev, dev);
return 0;
}
static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
{ "GFSH0003", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
static const struct of_device_id goldfish_pipe_of_match[] = {
{ .compatible = "google,android-pipe", },
{},
};
MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
static struct platform_driver goldfish_pipe_driver = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
.name = "goldfish_pipe",
.of_match_table = goldfish_pipe_of_match,
.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
}
};
module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/goldfish/goldfish_pipe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Surface System Aggregator Module (SSAM) subsystem device hubs.
*
* Provides a driver for SSAM subsystems device hubs. This driver performs
* instantiation of the devices managed by said hubs and takes care of
* (hot-)removal.
*
* Copyright (C) 2020-2022 Maximilian Luz <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/device.h>
/* -- SSAM generic subsystem hub driver framework. -------------------------- */
enum ssam_hub_state {
SSAM_HUB_UNINITIALIZED, /* Only set during initialization. */
SSAM_HUB_CONNECTED,
SSAM_HUB_DISCONNECTED,
};
enum ssam_hub_flags {
SSAM_HUB_HOT_REMOVED,
};
struct ssam_hub;
struct ssam_hub_ops {
int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
};
struct ssam_hub {
struct ssam_device *sdev;
enum ssam_hub_state state;
unsigned long flags;
struct delayed_work update_work;
unsigned long connect_delay;
struct ssam_event_notifier notif;
struct ssam_hub_ops ops;
};
struct ssam_hub_desc {
struct {
struct ssam_event_registry reg;
struct ssam_event_id id;
enum ssam_event_mask mask;
} event;
struct {
u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
int (*get_state)(struct ssam_hub *hub, enum ssam_hub_state *state);
} ops;
unsigned long connect_delay_ms;
};
static void ssam_hub_update_workfn(struct work_struct *work)
{
struct ssam_hub *hub = container_of(work, struct ssam_hub, update_work.work);
enum ssam_hub_state state;
int status = 0;
status = hub->ops.get_state(hub, &state);
if (status)
return;
/*
* There is a small possibility that hub devices were hot-removed and
* re-added before we were able to remove them here. In that case, both
* the state returned by get_state() and the state of the hub will
* equal SSAM_HUB_CONNECTED and we would bail early below, which would
* leave child devices without proper (re-)initialization and the
* hot-remove flag set.
*
* Therefore, we check whether devices have been hot-removed via an
* additional flag on the hub and, in this case, override the returned
* hub state. In case of a missed disconnect (i.e. get_state returned
* "connected"), we further need to re-schedule this work (with the
* appropriate delay) as the actual connect work submission might have
* been merged with this one.
*
* This then leads to one of two cases: Either we submit an unnecessary
* work item (which will get ignored via either the queue or the state
* checks) or, in the unlikely case that the work is actually required,
* double the normal connect delay.
*/
if (test_and_clear_bit(SSAM_HUB_HOT_REMOVED, &hub->flags)) {
if (state == SSAM_HUB_CONNECTED)
schedule_delayed_work(&hub->update_work, hub->connect_delay);
state = SSAM_HUB_DISCONNECTED;
}
if (hub->state == state)
return;
hub->state = state;
if (hub->state == SSAM_HUB_CONNECTED)
status = ssam_device_register_clients(hub->sdev);
else
ssam_remove_clients(&hub->sdev->dev);
if (status)
dev_err(&hub->sdev->dev, "failed to update hub child devices: %d\n", status);
}
static int ssam_hub_mark_hot_removed(struct device *dev, void *_data)
{
struct ssam_device *sdev = to_ssam_device(dev);
if (is_ssam_device(dev))
ssam_device_mark_hot_removed(sdev);
return 0;
}
static void ssam_hub_update(struct ssam_hub *hub, bool connected)
{
unsigned long delay;
/* Mark devices as hot-removed before we remove any. */
if (!connected) {
set_bit(SSAM_HUB_HOT_REMOVED, &hub->flags);
device_for_each_child_reverse(&hub->sdev->dev, NULL, ssam_hub_mark_hot_removed);
}
/*
* Delay update when the base/keyboard cover is being connected to give
* devices/EC some time to set up.
*/
delay = connected ? hub->connect_delay : 0;
schedule_delayed_work(&hub->update_work, delay);
}
static int __maybe_unused ssam_hub_resume(struct device *dev)
{
struct ssam_hub *hub = dev_get_drvdata(dev);
schedule_delayed_work(&hub->update_work, 0);
return 0;
}
static SIMPLE_DEV_PM_OPS(ssam_hub_pm_ops, NULL, ssam_hub_resume);
static int ssam_hub_probe(struct ssam_device *sdev)
{
const struct ssam_hub_desc *desc;
struct ssam_hub *hub;
int status;
desc = ssam_device_get_match_data(sdev);
if (!desc) {
WARN(1, "no driver match data specified");
return -EINVAL;
}
hub = devm_kzalloc(&sdev->dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
hub->sdev = sdev;
hub->state = SSAM_HUB_UNINITIALIZED;
hub->notif.base.priority = INT_MAX; /* This notifier should run first. */
hub->notif.base.fn = desc->ops.notify;
hub->notif.event.reg = desc->event.reg;
hub->notif.event.id = desc->event.id;
hub->notif.event.mask = desc->event.mask;
hub->notif.event.flags = SSAM_EVENT_SEQUENCED;
hub->connect_delay = msecs_to_jiffies(desc->connect_delay_ms);
hub->ops.get_state = desc->ops.get_state;
INIT_DELAYED_WORK(&hub->update_work, ssam_hub_update_workfn);
ssam_device_set_drvdata(sdev, hub);
status = ssam_device_notifier_register(sdev, &hub->notif);
if (status)
return status;
schedule_delayed_work(&hub->update_work, 0);
return 0;
}
static void ssam_hub_remove(struct ssam_device *sdev)
{
struct ssam_hub *hub = ssam_device_get_drvdata(sdev);
ssam_device_notifier_unregister(sdev, &hub->notif);
cancel_delayed_work_sync(&hub->update_work);
ssam_remove_clients(&sdev->dev);
}
/* -- SSAM base-subsystem hub driver. --------------------------------------- */
/*
* Some devices (especially battery) may need a bit of time to be fully usable
* after being (re-)connected. This delay has been determined via
* experimentation.
*/
#define SSAM_BASE_UPDATE_CONNECT_DELAY 2500
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0d,
.instance_id = 0x00,
});
#define SSAM_BAS_OPMODE_TABLET 0x00
#define SSAM_EVENT_BAS_CID_CONNECTION 0x0c
static int ssam_base_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
{
u8 opmode;
int status;
status = ssam_retry(ssam_bas_query_opmode, hub->sdev->ctrl, &opmode);
if (status < 0) {
dev_err(&hub->sdev->dev, "failed to query base state: %d\n", status);
return status;
}
if (opmode != SSAM_BAS_OPMODE_TABLET)
*state = SSAM_HUB_CONNECTED;
else
*state = SSAM_HUB_DISCONNECTED;
return 0;
}
static u32 ssam_base_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
{
struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
if (event->command_id != SSAM_EVENT_BAS_CID_CONNECTION)
return 0;
if (event->length < 1) {
dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
return 0;
}
ssam_hub_update(hub, event->data[0]);
/*
* Do not return SSAM_NOTIF_HANDLED: The event should be picked up and
* consumed by the detachment system driver. We're just a (more or less)
* silent observer.
*/
return 0;
}
static const struct ssam_hub_desc base_hub = {
.event = {
.reg = SSAM_EVENT_REGISTRY_SAM,
.id = {
.target_category = SSAM_SSH_TC_BAS,
.instance = 0,
},
.mask = SSAM_EVENT_MASK_NONE,
},
.ops = {
.notify = ssam_base_hub_notif,
.get_state = ssam_base_hub_query_state,
},
.connect_delay_ms = SSAM_BASE_UPDATE_CONNECT_DELAY,
};
/* -- SSAM KIP-subsystem hub driver. ---------------------------------------- */
/*
* Some devices may need a bit of time to be fully usable after being
* (re-)connected. This delay has been determined via experimentation.
*/
#define SSAM_KIP_UPDATE_CONNECT_DELAY 250
#define SSAM_EVENT_KIP_CID_CONNECTION 0x2c
SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
.target_category = SSAM_SSH_TC_KIP,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x2c,
.instance_id = 0x00,
});
static int ssam_kip_hub_query_state(struct ssam_hub *hub, enum ssam_hub_state *state)
{
int status;
u8 connected;
status = ssam_retry(__ssam_kip_query_state, hub->sdev->ctrl, &connected);
if (status < 0) {
dev_err(&hub->sdev->dev, "failed to query KIP connection state: %d\n", status);
return status;
}
*state = connected ? SSAM_HUB_CONNECTED : SSAM_HUB_DISCONNECTED;
return 0;
}
static u32 ssam_kip_hub_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
{
struct ssam_hub *hub = container_of(nf, struct ssam_hub, notif);
if (event->command_id != SSAM_EVENT_KIP_CID_CONNECTION)
return 0; /* Return "unhandled". */
if (event->length < 1) {
dev_err(&hub->sdev->dev, "unexpected payload size: %u\n", event->length);
return 0;
}
ssam_hub_update(hub, event->data[0]);
return SSAM_NOTIF_HANDLED;
}
static const struct ssam_hub_desc kip_hub = {
.event = {
.reg = SSAM_EVENT_REGISTRY_SAM,
.id = {
.target_category = SSAM_SSH_TC_KIP,
.instance = 0,
},
.mask = SSAM_EVENT_MASK_TARGET,
},
.ops = {
.notify = ssam_kip_hub_notif,
.get_state = ssam_kip_hub_query_state,
},
.connect_delay_ms = SSAM_KIP_UPDATE_CONNECT_DELAY,
};
/* -- Driver registration. -------------------------------------------------- */
static const struct ssam_device_id ssam_hub_match[] = {
{ SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
{ SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
{ }
};
MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
static struct ssam_device_driver ssam_subsystem_hub_driver = {
.probe = ssam_hub_probe,
.remove = ssam_hub_remove,
.match_table = ssam_hub_match,
.driver = {
.name = "surface_aggregator_subsystem_hub",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &ssam_hub_pm_ops,
},
};
module_ssam_device_driver(ssam_subsystem_hub_driver);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Subsystem device hub driver for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_aggregator_hub.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface Platform Profile / Performance Mode driver for Surface System
* Aggregator Module (thermal subsystem).
*
* Copyright (C) 2021-2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_profile.h>
#include <linux/types.h>
#include <linux/surface_aggregator/device.h>
enum ssam_tmp_profile {
SSAM_TMP_PROFILE_NORMAL = 1,
SSAM_TMP_PROFILE_BATTERY_SAVER = 2,
SSAM_TMP_PROFILE_BETTER_PERFORMANCE = 3,
SSAM_TMP_PROFILE_BEST_PERFORMANCE = 4,
};
struct ssam_tmp_profile_info {
__le32 profile;
__le16 unknown1;
__le16 unknown2;
} __packed;
struct ssam_tmp_profile_device {
struct ssam_device *sdev;
struct platform_profile_handler handler;
};
SSAM_DEFINE_SYNC_REQUEST_CL_R(__ssam_tmp_profile_get, struct ssam_tmp_profile_info, {
.target_category = SSAM_SSH_TC_TMP,
.command_id = 0x02,
});
SSAM_DEFINE_SYNC_REQUEST_CL_W(__ssam_tmp_profile_set, __le32, {
.target_category = SSAM_SSH_TC_TMP,
.command_id = 0x03,
});
static int ssam_tmp_profile_get(struct ssam_device *sdev, enum ssam_tmp_profile *p)
{
struct ssam_tmp_profile_info info;
int status;
status = ssam_retry(__ssam_tmp_profile_get, sdev, &info);
if (status < 0)
return status;
*p = le32_to_cpu(info.profile);
return 0;
}
static int ssam_tmp_profile_set(struct ssam_device *sdev, enum ssam_tmp_profile p)
{
__le32 profile_le = cpu_to_le32(p);
return ssam_retry(__ssam_tmp_profile_set, sdev, &profile_le);
}
static int convert_ssam_to_profile(struct ssam_device *sdev, enum ssam_tmp_profile p)
{
switch (p) {
case SSAM_TMP_PROFILE_NORMAL:
return PLATFORM_PROFILE_BALANCED;
case SSAM_TMP_PROFILE_BATTERY_SAVER:
return PLATFORM_PROFILE_LOW_POWER;
case SSAM_TMP_PROFILE_BETTER_PERFORMANCE:
return PLATFORM_PROFILE_BALANCED_PERFORMANCE;
case SSAM_TMP_PROFILE_BEST_PERFORMANCE:
return PLATFORM_PROFILE_PERFORMANCE;
default:
dev_err(&sdev->dev, "invalid performance profile: %d", p);
return -EINVAL;
}
}
static int convert_profile_to_ssam(struct ssam_device *sdev, enum platform_profile_option p)
{
switch (p) {
case PLATFORM_PROFILE_LOW_POWER:
return SSAM_TMP_PROFILE_BATTERY_SAVER;
case PLATFORM_PROFILE_BALANCED:
return SSAM_TMP_PROFILE_NORMAL;
case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
return SSAM_TMP_PROFILE_BETTER_PERFORMANCE;
case PLATFORM_PROFILE_PERFORMANCE:
return SSAM_TMP_PROFILE_BEST_PERFORMANCE;
default:
/* This should have already been caught by platform_profile_store(). */
WARN(true, "unsupported platform profile");
return -EOPNOTSUPP;
}
}
static int ssam_platform_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
struct ssam_tmp_profile_device *tpd;
enum ssam_tmp_profile tp;
int status;
tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
status = ssam_tmp_profile_get(tpd->sdev, &tp);
if (status)
return status;
status = convert_ssam_to_profile(tpd->sdev, tp);
if (status < 0)
return status;
*profile = status;
return 0;
}
static int ssam_platform_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct ssam_tmp_profile_device *tpd;
int tp;
tpd = container_of(pprof, struct ssam_tmp_profile_device, handler);
tp = convert_profile_to_ssam(tpd->sdev, profile);
if (tp < 0)
return tp;
return ssam_tmp_profile_set(tpd->sdev, tp);
}
static int surface_platform_profile_probe(struct ssam_device *sdev)
{
struct ssam_tmp_profile_device *tpd;
tpd = devm_kzalloc(&sdev->dev, sizeof(*tpd), GFP_KERNEL);
if (!tpd)
return -ENOMEM;
tpd->sdev = sdev;
tpd->handler.profile_get = ssam_platform_profile_get;
tpd->handler.profile_set = ssam_platform_profile_set;
set_bit(PLATFORM_PROFILE_LOW_POWER, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
platform_profile_register(&tpd->handler);
return 0;
}
static void surface_platform_profile_remove(struct ssam_device *sdev)
{
platform_profile_remove();
}
static const struct ssam_device_id ssam_platform_profile_match[] = {
{ SSAM_SDEV(TMP, SAM, 0x00, 0x01) },
{ },
};
MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
static struct ssam_device_driver surface_platform_profile = {
.probe = surface_platform_profile_probe,
.remove = surface_platform_profile_remove,
.match_table = ssam_platform_profile_match,
.driver = {
.name = "surface_platform_profile",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_ssam_device_driver(surface_platform_profile);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Platform Profile Support for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_platform_profile.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface Book (gen. 2 and later) detachment system (DTX) driver.
*
* Provides a user-space interface to properly handle clipboard/tablet
* (containing screen and processor) detachment from the base of the device
* (containing the keyboard and optionally a discrete GPU). Allows to
* acknowledge (to speed things up), abort (e.g. in case the dGPU is still in
* use), or request detachment via user-space.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <linux/fs.h>
#include <linux/input.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
#include <linux/surface_aggregator/dtx.h>
/* -- SSAM interface. ------------------------------------------------------- */
enum sam_event_cid_bas {
SAM_EVENT_CID_DTX_CONNECTION = 0x0c,
SAM_EVENT_CID_DTX_REQUEST = 0x0e,
SAM_EVENT_CID_DTX_CANCEL = 0x0f,
SAM_EVENT_CID_DTX_LATCH_STATUS = 0x11,
};
enum ssam_bas_base_state {
SSAM_BAS_BASE_STATE_DETACH_SUCCESS = 0x00,
SSAM_BAS_BASE_STATE_ATTACHED = 0x01,
SSAM_BAS_BASE_STATE_NOT_FEASIBLE = 0x02,
};
enum ssam_bas_latch_status {
SSAM_BAS_LATCH_STATUS_CLOSED = 0x00,
SSAM_BAS_LATCH_STATUS_OPENED = 0x01,
SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN = 0x02,
SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN = 0x03,
SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE = 0x04,
};
enum ssam_bas_cancel_reason {
SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE = 0x00, /* Low battery. */
SSAM_BAS_CANCEL_REASON_TIMEOUT = 0x02,
SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN = 0x03,
SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN = 0x04,
SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE = 0x05,
};
struct ssam_bas_base_info {
u8 state;
u8 base_id;
} __packed;
static_assert(sizeof(struct ssam_bas_base_info) == 2);
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x06,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x07,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x08,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x09,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0a,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0b,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0c,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0d,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
.target_category = SSAM_SSH_TC_BAS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x11,
.instance_id = 0x00,
});
/* -- Main structures. ------------------------------------------------------ */
enum sdtx_device_state {
SDTX_DEVICE_SHUTDOWN_BIT = BIT(0),
SDTX_DEVICE_DIRTY_BASE_BIT = BIT(1),
SDTX_DEVICE_DIRTY_MODE_BIT = BIT(2),
SDTX_DEVICE_DIRTY_LATCH_BIT = BIT(3),
};
struct sdtx_device {
struct kref kref;
struct rw_semaphore lock; /* Guards device and controller reference. */
struct device *dev;
struct ssam_controller *ctrl;
unsigned long flags;
struct miscdevice mdev;
wait_queue_head_t waitq;
struct mutex write_lock; /* Guards order of events/notifications. */
struct rw_semaphore client_lock; /* Guards client list. */
struct list_head client_list;
struct delayed_work state_work;
struct {
struct ssam_bas_base_info base;
u8 device_mode;
u8 latch_status;
} state;
struct delayed_work mode_work;
struct input_dev *mode_switch;
struct ssam_event_notifier notif;
};
enum sdtx_client_state {
SDTX_CLIENT_EVENTS_ENABLED_BIT = BIT(0),
};
struct sdtx_client {
struct sdtx_device *ddev;
struct list_head node;
unsigned long flags;
struct fasync_struct *fasync;
struct mutex read_lock; /* Guards FIFO buffer read access. */
DECLARE_KFIFO(buffer, u8, 512);
};
static void __sdtx_device_release(struct kref *kref)
{
struct sdtx_device *ddev = container_of(kref, struct sdtx_device, kref);
mutex_destroy(&ddev->write_lock);
kfree(ddev);
}
static struct sdtx_device *sdtx_device_get(struct sdtx_device *ddev)
{
if (ddev)
kref_get(&ddev->kref);
return ddev;
}
static void sdtx_device_put(struct sdtx_device *ddev)
{
if (ddev)
kref_put(&ddev->kref, __sdtx_device_release);
}
/* -- Firmware value translations. ------------------------------------------ */
static u16 sdtx_translate_base_state(struct sdtx_device *ddev, u8 state)
{
switch (state) {
case SSAM_BAS_BASE_STATE_ATTACHED:
return SDTX_BASE_ATTACHED;
case SSAM_BAS_BASE_STATE_DETACH_SUCCESS:
return SDTX_BASE_DETACHED;
case SSAM_BAS_BASE_STATE_NOT_FEASIBLE:
return SDTX_DETACH_NOT_FEASIBLE;
default:
dev_err(ddev->dev, "unknown base state: %#04x\n", state);
return SDTX_UNKNOWN(state);
}
}
static u16 sdtx_translate_latch_status(struct sdtx_device *ddev, u8 status)
{
switch (status) {
case SSAM_BAS_LATCH_STATUS_CLOSED:
return SDTX_LATCH_CLOSED;
case SSAM_BAS_LATCH_STATUS_OPENED:
return SDTX_LATCH_OPENED;
case SSAM_BAS_LATCH_STATUS_FAILED_TO_OPEN:
return SDTX_ERR_FAILED_TO_OPEN;
case SSAM_BAS_LATCH_STATUS_FAILED_TO_REMAIN_OPEN:
return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
case SSAM_BAS_LATCH_STATUS_FAILED_TO_CLOSE:
return SDTX_ERR_FAILED_TO_CLOSE;
default:
dev_err(ddev->dev, "unknown latch status: %#04x\n", status);
return SDTX_UNKNOWN(status);
}
}
static u16 sdtx_translate_cancel_reason(struct sdtx_device *ddev, u8 reason)
{
switch (reason) {
case SSAM_BAS_CANCEL_REASON_NOT_FEASIBLE:
return SDTX_DETACH_NOT_FEASIBLE;
case SSAM_BAS_CANCEL_REASON_TIMEOUT:
return SDTX_DETACH_TIMEDOUT;
case SSAM_BAS_CANCEL_REASON_FAILED_TO_OPEN:
return SDTX_ERR_FAILED_TO_OPEN;
case SSAM_BAS_CANCEL_REASON_FAILED_TO_REMAIN_OPEN:
return SDTX_ERR_FAILED_TO_REMAIN_OPEN;
case SSAM_BAS_CANCEL_REASON_FAILED_TO_CLOSE:
return SDTX_ERR_FAILED_TO_CLOSE;
default:
dev_err(ddev->dev, "unknown cancel reason: %#04x\n", reason);
return SDTX_UNKNOWN(reason);
}
}
/* -- IOCTLs. --------------------------------------------------------------- */
static int sdtx_ioctl_get_base_info(struct sdtx_device *ddev,
struct sdtx_base_info __user *buf)
{
struct ssam_bas_base_info raw;
struct sdtx_base_info info;
int status;
lockdep_assert_held_read(&ddev->lock);
status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &raw);
if (status < 0)
return status;
info.state = sdtx_translate_base_state(ddev, raw.state);
info.base_id = SDTX_BASE_TYPE_SSH(raw.base_id);
if (copy_to_user(buf, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int sdtx_ioctl_get_device_mode(struct sdtx_device *ddev, u16 __user *buf)
{
u8 mode;
int status;
lockdep_assert_held_read(&ddev->lock);
status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
if (status < 0)
return status;
return put_user(mode, buf);
}
static int sdtx_ioctl_get_latch_status(struct sdtx_device *ddev, u16 __user *buf)
{
u8 latch;
int status;
lockdep_assert_held_read(&ddev->lock);
status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
if (status < 0)
return status;
return put_user(sdtx_translate_latch_status(ddev, latch), buf);
}
static long __surface_dtx_ioctl(struct sdtx_client *client, unsigned int cmd, unsigned long arg)
{
struct sdtx_device *ddev = client->ddev;
lockdep_assert_held_read(&ddev->lock);
switch (cmd) {
case SDTX_IOCTL_EVENTS_ENABLE:
set_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
return 0;
case SDTX_IOCTL_EVENTS_DISABLE:
clear_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags);
return 0;
case SDTX_IOCTL_LATCH_LOCK:
return ssam_retry(ssam_bas_latch_lock, ddev->ctrl);
case SDTX_IOCTL_LATCH_UNLOCK:
return ssam_retry(ssam_bas_latch_unlock, ddev->ctrl);
case SDTX_IOCTL_LATCH_REQUEST:
return ssam_retry(ssam_bas_latch_request, ddev->ctrl);
case SDTX_IOCTL_LATCH_CONFIRM:
return ssam_retry(ssam_bas_latch_confirm, ddev->ctrl);
case SDTX_IOCTL_LATCH_HEARTBEAT:
return ssam_retry(ssam_bas_latch_heartbeat, ddev->ctrl);
case SDTX_IOCTL_LATCH_CANCEL:
return ssam_retry(ssam_bas_latch_cancel, ddev->ctrl);
case SDTX_IOCTL_GET_BASE_INFO:
return sdtx_ioctl_get_base_info(ddev, (struct sdtx_base_info __user *)arg);
case SDTX_IOCTL_GET_DEVICE_MODE:
return sdtx_ioctl_get_device_mode(ddev, (u16 __user *)arg);
case SDTX_IOCTL_GET_LATCH_STATUS:
return sdtx_ioctl_get_latch_status(ddev, (u16 __user *)arg);
default:
return -EINVAL;
}
}
static long surface_dtx_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sdtx_client *client = file->private_data;
long status;
if (down_read_killable(&client->ddev->lock))
return -ERESTARTSYS;
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
up_read(&client->ddev->lock);
return -ENODEV;
}
status = __surface_dtx_ioctl(client, cmd, arg);
up_read(&client->ddev->lock);
return status;
}
/* -- File operations. ------------------------------------------------------ */
static int surface_dtx_open(struct inode *inode, struct file *file)
{
struct sdtx_device *ddev = container_of(file->private_data, struct sdtx_device, mdev);
struct sdtx_client *client;
/* Initialize client. */
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return -ENOMEM;
client->ddev = sdtx_device_get(ddev);
INIT_LIST_HEAD(&client->node);
mutex_init(&client->read_lock);
INIT_KFIFO(client->buffer);
file->private_data = client;
/* Attach client. */
down_write(&ddev->client_lock);
/*
* Do not add a new client if the device has been shut down. Note that
* it's enough to hold the client_lock here as, during shutdown, we
* only acquire that lock and remove clients after marking the device
* as shut down.
*/
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_write(&ddev->client_lock);
mutex_destroy(&client->read_lock);
sdtx_device_put(client->ddev);
kfree(client);
return -ENODEV;
}
list_add_tail(&client->node, &ddev->client_list);
up_write(&ddev->client_lock);
stream_open(inode, file);
return 0;
}
static int surface_dtx_release(struct inode *inode, struct file *file)
{
struct sdtx_client *client = file->private_data;
/* Detach client. */
down_write(&client->ddev->client_lock);
list_del(&client->node);
up_write(&client->ddev->client_lock);
/* Free client. */
sdtx_device_put(client->ddev);
mutex_destroy(&client->read_lock);
kfree(client);
return 0;
}
static ssize_t surface_dtx_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
{
struct sdtx_client *client = file->private_data;
struct sdtx_device *ddev = client->ddev;
unsigned int copied;
int status = 0;
if (down_read_killable(&ddev->lock))
return -ERESTARTSYS;
/* Make sure we're not shut down. */
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_read(&ddev->lock);
return -ENODEV;
}
do {
/* Check availability, wait if necessary. */
if (kfifo_is_empty(&client->buffer)) {
up_read(&ddev->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
status = wait_event_interruptible(ddev->waitq,
!kfifo_is_empty(&client->buffer) ||
test_bit(SDTX_DEVICE_SHUTDOWN_BIT,
&ddev->flags));
if (status < 0)
return status;
if (down_read_killable(&ddev->lock))
return -ERESTARTSYS;
/* Need to check that we're not shut down again. */
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags)) {
up_read(&ddev->lock);
return -ENODEV;
}
}
/* Try to read from FIFO. */
if (mutex_lock_interruptible(&client->read_lock)) {
up_read(&ddev->lock);
return -ERESTARTSYS;
}
status = kfifo_to_user(&client->buffer, buf, count, &copied);
mutex_unlock(&client->read_lock);
if (status < 0) {
up_read(&ddev->lock);
return status;
}
/* We might not have gotten anything, check this here. */
if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
up_read(&ddev->lock);
return -EAGAIN;
}
} while (copied == 0);
up_read(&ddev->lock);
return copied;
}
static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt)
{
struct sdtx_client *client = file->private_data;
__poll_t events = 0;
if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
return EPOLLHUP | EPOLLERR;
poll_wait(file, &client->ddev->waitq, pt);
if (!kfifo_is_empty(&client->buffer))
events |= EPOLLIN | EPOLLRDNORM;
return events;
}
static int surface_dtx_fasync(int fd, struct file *file, int on)
{
struct sdtx_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static const struct file_operations surface_dtx_fops = {
.owner = THIS_MODULE,
.open = surface_dtx_open,
.release = surface_dtx_release,
.read = surface_dtx_read,
.poll = surface_dtx_poll,
.fasync = surface_dtx_fasync,
.unlocked_ioctl = surface_dtx_ioctl,
.compat_ioctl = surface_dtx_ioctl,
.llseek = no_llseek,
};
/* -- Event handling/forwarding. -------------------------------------------- */
/*
* The device operation mode is not immediately updated on the EC when the
* base has been connected, i.e. querying the device mode inside the
* connection event callback yields an outdated value. Thus, we can only
* determine the new tablet-mode switch and device mode values after some
* time.
*
* These delays have been chosen by experimenting. We first delay on connect
* events, then check and validate the device mode against the base state and
* if invalid delay again by the "recheck" delay.
*/
#define SDTX_DEVICE_MODE_DELAY_CONNECT msecs_to_jiffies(100)
#define SDTX_DEVICE_MODE_DELAY_RECHECK msecs_to_jiffies(100)
struct sdtx_status_event {
struct sdtx_event e;
__u16 v;
} __packed;
struct sdtx_base_info_event {
struct sdtx_event e;
struct sdtx_base_info v;
} __packed;
union sdtx_generic_event {
struct sdtx_event common;
struct sdtx_status_event status;
struct sdtx_base_info_event base;
};
static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay);
/* Must be executed with ddev->write_lock held. */
static void sdtx_push_event(struct sdtx_device *ddev, struct sdtx_event *evt)
{
const size_t len = sizeof(struct sdtx_event) + evt->length;
struct sdtx_client *client;
lockdep_assert_held(&ddev->write_lock);
down_read(&ddev->client_lock);
list_for_each_entry(client, &ddev->client_list, node) {
if (!test_bit(SDTX_CLIENT_EVENTS_ENABLED_BIT, &client->flags))
continue;
if (likely(kfifo_avail(&client->buffer) >= len))
kfifo_in(&client->buffer, (const u8 *)evt, len);
else
dev_warn(ddev->dev, "event buffer overrun\n");
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
up_read(&ddev->client_lock);
wake_up_interruptible(&ddev->waitq);
}
static u32 sdtx_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
{
struct sdtx_device *ddev = container_of(nf, struct sdtx_device, notif);
union sdtx_generic_event event;
size_t len;
/* Validate event payload length. */
switch (in->command_id) {
case SAM_EVENT_CID_DTX_CONNECTION:
len = 2 * sizeof(u8);
break;
case SAM_EVENT_CID_DTX_REQUEST:
len = 0;
break;
case SAM_EVENT_CID_DTX_CANCEL:
len = sizeof(u8);
break;
case SAM_EVENT_CID_DTX_LATCH_STATUS:
len = sizeof(u8);
break;
default:
return 0;
}
if (in->length != len) {
dev_err(ddev->dev,
"unexpected payload size for event %#04x: got %u, expected %zu\n",
in->command_id, in->length, len);
return 0;
}
mutex_lock(&ddev->write_lock);
/* Translate event. */
switch (in->command_id) {
case SAM_EVENT_CID_DTX_CONNECTION:
clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
/* If state has not changed: do not send new event. */
if (ddev->state.base.state == in->data[0] &&
ddev->state.base.base_id == in->data[1])
goto out;
ddev->state.base.state = in->data[0];
ddev->state.base.base_id = in->data[1];
event.base.e.length = sizeof(struct sdtx_base_info);
event.base.e.code = SDTX_EVENT_BASE_CONNECTION;
event.base.v.state = sdtx_translate_base_state(ddev, in->data[0]);
event.base.v.base_id = SDTX_BASE_TYPE_SSH(in->data[1]);
break;
case SAM_EVENT_CID_DTX_REQUEST:
event.common.code = SDTX_EVENT_REQUEST;
event.common.length = 0;
break;
case SAM_EVENT_CID_DTX_CANCEL:
event.status.e.length = sizeof(u16);
event.status.e.code = SDTX_EVENT_CANCEL;
event.status.v = sdtx_translate_cancel_reason(ddev, in->data[0]);
break;
case SAM_EVENT_CID_DTX_LATCH_STATUS:
clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
/* If state has not changed: do not send new event. */
if (ddev->state.latch_status == in->data[0])
goto out;
ddev->state.latch_status = in->data[0];
event.status.e.length = sizeof(u16);
event.status.e.code = SDTX_EVENT_LATCH_STATUS;
event.status.v = sdtx_translate_latch_status(ddev, in->data[0]);
break;
}
sdtx_push_event(ddev, &event.common);
/* Update device mode on base connection change. */
if (in->command_id == SAM_EVENT_CID_DTX_CONNECTION) {
unsigned long delay;
delay = in->data[0] ? SDTX_DEVICE_MODE_DELAY_CONNECT : 0;
sdtx_update_device_mode(ddev, delay);
}
out:
mutex_unlock(&ddev->write_lock);
return SSAM_NOTIF_HANDLED;
}
/* -- State update functions. ----------------------------------------------- */
static bool sdtx_device_mode_invalid(u8 mode, u8 base_state)
{
return ((base_state == SSAM_BAS_BASE_STATE_ATTACHED) &&
(mode == SDTX_DEVICE_MODE_TABLET)) ||
((base_state == SSAM_BAS_BASE_STATE_DETACH_SUCCESS) &&
(mode != SDTX_DEVICE_MODE_TABLET));
}
static void sdtx_device_mode_workfn(struct work_struct *work)
{
struct sdtx_device *ddev = container_of(work, struct sdtx_device, mode_work.work);
struct sdtx_status_event event;
struct ssam_bas_base_info base;
int status, tablet;
u8 mode;
/* Get operation mode. */
status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
if (status) {
dev_err(ddev->dev, "failed to get device mode: %d\n", status);
return;
}
/* Get base info. */
status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
if (status) {
dev_err(ddev->dev, "failed to get base info: %d\n", status);
return;
}
/*
* In some cases (specifically when attaching the base), the device
* mode isn't updated right away. Thus we check if the device mode
* makes sense for the given base state and try again later if it
* doesn't.
*/
if (sdtx_device_mode_invalid(mode, base.state)) {
dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
return;
}
mutex_lock(&ddev->write_lock);
clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
/* Avoid sending duplicate device-mode events. */
if (ddev->state.device_mode == mode) {
mutex_unlock(&ddev->write_lock);
return;
}
ddev->state.device_mode = mode;
event.e.length = sizeof(u16);
event.e.code = SDTX_EVENT_DEVICE_MODE;
event.v = mode;
sdtx_push_event(ddev, &event.e);
/* Send SW_TABLET_MODE event. */
tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
input_sync(ddev->mode_switch);
mutex_unlock(&ddev->write_lock);
}
static void sdtx_update_device_mode(struct sdtx_device *ddev, unsigned long delay)
{
schedule_delayed_work(&ddev->mode_work, delay);
}
/* Must be executed with ddev->write_lock held. */
static void __sdtx_device_state_update_base(struct sdtx_device *ddev,
struct ssam_bas_base_info info)
{
struct sdtx_base_info_event event;
lockdep_assert_held(&ddev->write_lock);
/* Prevent duplicate events. */
if (ddev->state.base.state == info.state &&
ddev->state.base.base_id == info.base_id)
return;
ddev->state.base = info;
event.e.length = sizeof(struct sdtx_base_info);
event.e.code = SDTX_EVENT_BASE_CONNECTION;
event.v.state = sdtx_translate_base_state(ddev, info.state);
event.v.base_id = SDTX_BASE_TYPE_SSH(info.base_id);
sdtx_push_event(ddev, &event.e);
}
/* Must be executed with ddev->write_lock held. */
static void __sdtx_device_state_update_mode(struct sdtx_device *ddev, u8 mode)
{
struct sdtx_status_event event;
int tablet;
/*
* Note: This function must be called after updating the base state
* via __sdtx_device_state_update_base(), as we rely on the updated
* base state value in the validity check below.
*/
lockdep_assert_held(&ddev->write_lock);
if (sdtx_device_mode_invalid(mode, ddev->state.base.state)) {
dev_dbg(ddev->dev, "device mode is invalid, trying again\n");
sdtx_update_device_mode(ddev, SDTX_DEVICE_MODE_DELAY_RECHECK);
return;
}
/* Prevent duplicate events. */
if (ddev->state.device_mode == mode)
return;
ddev->state.device_mode = mode;
/* Send event. */
event.e.length = sizeof(u16);
event.e.code = SDTX_EVENT_DEVICE_MODE;
event.v = mode;
sdtx_push_event(ddev, &event.e);
/* Send SW_TABLET_MODE event. */
tablet = mode != SDTX_DEVICE_MODE_LAPTOP;
input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet);
input_sync(ddev->mode_switch);
}
/* Must be executed with ddev->write_lock held. */
static void __sdtx_device_state_update_latch(struct sdtx_device *ddev, u8 status)
{
struct sdtx_status_event event;
lockdep_assert_held(&ddev->write_lock);
/* Prevent duplicate events. */
if (ddev->state.latch_status == status)
return;
ddev->state.latch_status = status;
event.e.length = sizeof(struct sdtx_base_info);
event.e.code = SDTX_EVENT_BASE_CONNECTION;
event.v = sdtx_translate_latch_status(ddev, status);
sdtx_push_event(ddev, &event.e);
}
static void sdtx_device_state_workfn(struct work_struct *work)
{
struct sdtx_device *ddev = container_of(work, struct sdtx_device, state_work.work);
struct ssam_bas_base_info base;
u8 mode, latch;
int status;
/* Mark everything as dirty. */
set_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags);
set_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags);
set_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags);
/*
* Ensure that the state gets marked as dirty before continuing to
* query it. Necessary to ensure that clear_bit() calls in
* sdtx_notifier() and sdtx_device_mode_workfn() actually clear these
* bits if an event is received while updating the state here.
*/
smp_mb__after_atomic();
status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &base);
if (status) {
dev_err(ddev->dev, "failed to get base state: %d\n", status);
return;
}
status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &mode);
if (status) {
dev_err(ddev->dev, "failed to get device mode: %d\n", status);
return;
}
status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &latch);
if (status) {
dev_err(ddev->dev, "failed to get latch status: %d\n", status);
return;
}
mutex_lock(&ddev->write_lock);
/*
* If the respective dirty-bit has been cleared, an event has been
* received, updating this state. The queried state may thus be out of
* date. At this point, we can safely assume that the state provided
* by the event is either up to date, or we're about to receive
* another event updating it.
*/
if (test_and_clear_bit(SDTX_DEVICE_DIRTY_BASE_BIT, &ddev->flags))
__sdtx_device_state_update_base(ddev, base);
if (test_and_clear_bit(SDTX_DEVICE_DIRTY_MODE_BIT, &ddev->flags))
__sdtx_device_state_update_mode(ddev, mode);
if (test_and_clear_bit(SDTX_DEVICE_DIRTY_LATCH_BIT, &ddev->flags))
__sdtx_device_state_update_latch(ddev, latch);
mutex_unlock(&ddev->write_lock);
}
static void sdtx_update_device_state(struct sdtx_device *ddev, unsigned long delay)
{
schedule_delayed_work(&ddev->state_work, delay);
}
/* -- Common device initialization. ----------------------------------------- */
static int sdtx_device_init(struct sdtx_device *ddev, struct device *dev,
struct ssam_controller *ctrl)
{
int status, tablet_mode;
/* Basic initialization. */
kref_init(&ddev->kref);
init_rwsem(&ddev->lock);
ddev->dev = dev;
ddev->ctrl = ctrl;
ddev->mdev.minor = MISC_DYNAMIC_MINOR;
ddev->mdev.name = "surface_dtx";
ddev->mdev.nodename = "surface/dtx";
ddev->mdev.fops = &surface_dtx_fops;
ddev->notif.base.priority = 1;
ddev->notif.base.fn = sdtx_notifier;
ddev->notif.event.reg = SSAM_EVENT_REGISTRY_SAM;
ddev->notif.event.id.target_category = SSAM_SSH_TC_BAS;
ddev->notif.event.id.instance = 0;
ddev->notif.event.mask = SSAM_EVENT_MASK_NONE;
ddev->notif.event.flags = SSAM_EVENT_SEQUENCED;
init_waitqueue_head(&ddev->waitq);
mutex_init(&ddev->write_lock);
init_rwsem(&ddev->client_lock);
INIT_LIST_HEAD(&ddev->client_list);
INIT_DELAYED_WORK(&ddev->mode_work, sdtx_device_mode_workfn);
INIT_DELAYED_WORK(&ddev->state_work, sdtx_device_state_workfn);
/*
* Get current device state. We want to guarantee that events are only
* sent when state actually changes. Thus we cannot use special
* "uninitialized" values, as that would cause problems when manually
* querying the state in surface_dtx_pm_complete(). I.e. we would not
* be able to detect state changes there if no change event has been
* received between driver initialization and first device suspension.
*
* Note that we also need to do this before registering the event
* notifier, as that may access the state values.
*/
status = ssam_retry(ssam_bas_get_base, ddev->ctrl, &ddev->state.base);
if (status)
return status;
status = ssam_retry(ssam_bas_get_device_mode, ddev->ctrl, &ddev->state.device_mode);
if (status)
return status;
status = ssam_retry(ssam_bas_get_latch_status, ddev->ctrl, &ddev->state.latch_status);
if (status)
return status;
/* Set up tablet mode switch. */
ddev->mode_switch = input_allocate_device();
if (!ddev->mode_switch)
return -ENOMEM;
ddev->mode_switch->name = "Microsoft Surface DTX Device Mode Switch";
ddev->mode_switch->phys = "ssam/01:11:01:00:00/input0";
ddev->mode_switch->id.bustype = BUS_HOST;
ddev->mode_switch->dev.parent = ddev->dev;
tablet_mode = (ddev->state.device_mode != SDTX_DEVICE_MODE_LAPTOP);
input_set_capability(ddev->mode_switch, EV_SW, SW_TABLET_MODE);
input_report_switch(ddev->mode_switch, SW_TABLET_MODE, tablet_mode);
status = input_register_device(ddev->mode_switch);
if (status) {
input_free_device(ddev->mode_switch);
return status;
}
/* Set up event notifier. */
status = ssam_notifier_register(ddev->ctrl, &ddev->notif);
if (status)
goto err_notif;
/* Register miscdevice. */
status = misc_register(&ddev->mdev);
if (status)
goto err_mdev;
/*
* Update device state in case it has changed between getting the
* initial mode and registering the event notifier.
*/
sdtx_update_device_state(ddev, 0);
return 0;
err_notif:
ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
cancel_delayed_work_sync(&ddev->mode_work);
err_mdev:
input_unregister_device(ddev->mode_switch);
return status;
}
static struct sdtx_device *sdtx_device_create(struct device *dev, struct ssam_controller *ctrl)
{
struct sdtx_device *ddev;
int status;
ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
if (!ddev)
return ERR_PTR(-ENOMEM);
status = sdtx_device_init(ddev, dev, ctrl);
if (status) {
sdtx_device_put(ddev);
return ERR_PTR(status);
}
return ddev;
}
static void sdtx_device_destroy(struct sdtx_device *ddev)
{
struct sdtx_client *client;
/*
* Mark device as shut-down. Prevent new clients from being added and
* new operations from being executed.
*/
set_bit(SDTX_DEVICE_SHUTDOWN_BIT, &ddev->flags);
/* Disable notifiers, prevent new events from arriving. */
ssam_notifier_unregister(ddev->ctrl, &ddev->notif);
/* Stop mode_work, prevent access to mode_switch. */
cancel_delayed_work_sync(&ddev->mode_work);
/* Stop state_work. */
cancel_delayed_work_sync(&ddev->state_work);
/* With mode_work canceled, we can unregister the mode_switch. */
input_unregister_device(ddev->mode_switch);
/* Wake up async clients. */
down_write(&ddev->client_lock);
list_for_each_entry(client, &ddev->client_list, node) {
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
}
up_write(&ddev->client_lock);
/* Wake up blocking clients. */
wake_up_interruptible(&ddev->waitq);
/*
* Wait for clients to finish their current operation. After this, the
* controller and device references are guaranteed to be no longer in
* use.
*/
down_write(&ddev->lock);
ddev->dev = NULL;
ddev->ctrl = NULL;
up_write(&ddev->lock);
/* Finally remove the misc-device. */
misc_deregister(&ddev->mdev);
/*
* We're now guaranteed that sdtx_device_open() won't be called any
* more, so we can now drop out reference.
*/
sdtx_device_put(ddev);
}
/* -- PM ops. --------------------------------------------------------------- */
#ifdef CONFIG_PM_SLEEP
static void surface_dtx_pm_complete(struct device *dev)
{
struct sdtx_device *ddev = dev_get_drvdata(dev);
/*
* Normally, the EC will store events while suspended (i.e. in
* display-off state) and release them when resumed (i.e. transitioned
* to display-on state). During hibernation, however, the EC will be
* shut down and does not store events. Furthermore, events might be
* dropped during prolonged suspension (it is currently unknown how
* big this event buffer is and how it behaves on overruns).
*
* To prevent any problems, we update the device state here. We do
* this delayed to ensure that any events sent by the EC directly
* after resuming will be handled first. The delay below has been
* chosen (experimentally), so that there should be ample time for
* these events to be handled, before we check and, if necessary,
* update the state.
*/
sdtx_update_device_state(ddev, msecs_to_jiffies(1000));
}
static const struct dev_pm_ops surface_dtx_pm_ops = {
.complete = surface_dtx_pm_complete,
};
#else /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops surface_dtx_pm_ops = {};
#endif /* CONFIG_PM_SLEEP */
/* -- Platform driver. ------------------------------------------------------ */
static int surface_dtx_platform_probe(struct platform_device *pdev)
{
struct ssam_controller *ctrl;
struct sdtx_device *ddev;
/* Link to EC. */
ctrl = ssam_client_bind(&pdev->dev);
if (IS_ERR(ctrl))
return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
ddev = sdtx_device_create(&pdev->dev, ctrl);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
platform_set_drvdata(pdev, ddev);
return 0;
}
static int surface_dtx_platform_remove(struct platform_device *pdev)
{
sdtx_device_destroy(platform_get_drvdata(pdev));
return 0;
}
static const struct acpi_device_id surface_dtx_acpi_match[] = {
{ "MSHW0133", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, surface_dtx_acpi_match);
static struct platform_driver surface_dtx_platform_driver = {
.probe = surface_dtx_platform_probe,
.remove = surface_dtx_platform_remove,
.driver = {
.name = "surface_dtx_pltf",
.acpi_match_table = surface_dtx_acpi_match,
.pm = &surface_dtx_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
/* -- SSAM device driver. --------------------------------------------------- */
#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
static int surface_dtx_ssam_probe(struct ssam_device *sdev)
{
struct sdtx_device *ddev;
ddev = sdtx_device_create(&sdev->dev, sdev->ctrl);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ssam_device_set_drvdata(sdev, ddev);
return 0;
}
static void surface_dtx_ssam_remove(struct ssam_device *sdev)
{
sdtx_device_destroy(ssam_device_get_drvdata(sdev));
}
static const struct ssam_device_id surface_dtx_ssam_match[] = {
{ SSAM_SDEV(BAS, SAM, 0x00, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
static struct ssam_device_driver surface_dtx_ssam_driver = {
.probe = surface_dtx_ssam_probe,
.remove = surface_dtx_ssam_remove,
.match_table = surface_dtx_ssam_match,
.driver = {
.name = "surface_dtx",
.pm = &surface_dtx_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int ssam_dtx_driver_register(void)
{
return ssam_device_driver_register(&surface_dtx_ssam_driver);
}
static void ssam_dtx_driver_unregister(void)
{
ssam_device_driver_unregister(&surface_dtx_ssam_driver);
}
#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
static int ssam_dtx_driver_register(void)
{
return 0;
}
static void ssam_dtx_driver_unregister(void)
{
}
#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
/* -- Module setup. --------------------------------------------------------- */
static int __init surface_dtx_init(void)
{
int status;
status = ssam_dtx_driver_register();
if (status)
return status;
status = platform_driver_register(&surface_dtx_platform_driver);
if (status)
ssam_dtx_driver_unregister();
return status;
}
module_init(surface_dtx_init);
static void __exit surface_dtx_exit(void)
{
platform_driver_unregister(&surface_dtx_platform_driver);
ssam_dtx_driver_unregister();
}
module_exit(surface_dtx_exit);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Detachment-system driver for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_dtx.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the Surface ACPI Notify (SAN) interface/shim.
*
* Translates communication from ACPI to Surface System Aggregator Module
* (SSAM/SAM) requests and back, specifically SAM-over-SSH. Translates SSAM
* events back to ACPI notifications. Allows handling of discrete GPU
* notifications sent from ACPI via the SAN interface by providing them to any
* registered external driver.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/rwsem.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_acpi_notify.h>
struct san_data {
struct device *dev;
struct ssam_controller *ctrl;
struct acpi_connection_info info;
struct ssam_event_notifier nf_bat;
struct ssam_event_notifier nf_tmp;
};
#define to_san_data(ptr, member) \
container_of(ptr, struct san_data, member)
static struct workqueue_struct *san_wq;
/* -- dGPU notifier interface. ---------------------------------------------- */
struct san_rqsg_if {
struct rw_semaphore lock;
struct device *dev;
struct blocking_notifier_head nh;
};
static struct san_rqsg_if san_rqsg_if = {
.lock = __RWSEM_INITIALIZER(san_rqsg_if.lock),
.dev = NULL,
.nh = BLOCKING_NOTIFIER_INIT(san_rqsg_if.nh),
};
static int san_set_rqsg_interface_device(struct device *dev)
{
int status = 0;
down_write(&san_rqsg_if.lock);
if (!san_rqsg_if.dev && dev)
san_rqsg_if.dev = dev;
else
status = -EBUSY;
up_write(&san_rqsg_if.lock);
return status;
}
/**
* san_client_link() - Link client as consumer to SAN device.
* @client: The client to link.
*
* Sets up a device link between the provided client device as consumer and
* the SAN device as provider. This function can be used to ensure that the
* SAN interface has been set up and will be set up for as long as the driver
* of the client device is bound. This guarantees that, during that time, all
* dGPU events will be received by any registered notifier.
*
* The link will be automatically removed once the client device's driver is
* unbound.
*
* Return: Returns zero on success, %-ENXIO if the SAN interface has not been
* set up yet, and %-ENOMEM if device link creation failed.
*/
int san_client_link(struct device *client)
{
const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
down_read(&san_rqsg_if.lock);
if (!san_rqsg_if.dev) {
up_read(&san_rqsg_if.lock);
return -ENXIO;
}
link = device_link_add(client, san_rqsg_if.dev, flags);
if (!link) {
up_read(&san_rqsg_if.lock);
return -ENOMEM;
}
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
up_read(&san_rqsg_if.lock);
return -ENXIO;
}
up_read(&san_rqsg_if.lock);
return 0;
}
EXPORT_SYMBOL_GPL(san_client_link);
/**
* san_dgpu_notifier_register() - Register a SAN dGPU notifier.
* @nb: The notifier-block to register.
*
* Registers a SAN dGPU notifier, receiving any new SAN dGPU events sent from
* ACPI. The registered notifier will be called with &struct san_dgpu_event
* as notifier data and the command ID of that event as notifier action.
*/
int san_dgpu_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&san_rqsg_if.nh, nb);
}
EXPORT_SYMBOL_GPL(san_dgpu_notifier_register);
/**
* san_dgpu_notifier_unregister() - Unregister a SAN dGPU notifier.
* @nb: The notifier-block to unregister.
*/
int san_dgpu_notifier_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&san_rqsg_if.nh, nb);
}
EXPORT_SYMBOL_GPL(san_dgpu_notifier_unregister);
static int san_dgpu_notifier_call(struct san_dgpu_event *evt)
{
int ret;
ret = blocking_notifier_call_chain(&san_rqsg_if.nh, evt->command, evt);
return notifier_to_errno(ret);
}
/* -- ACPI _DSM event relay. ------------------------------------------------ */
#define SAN_DSM_REVISION 0
/* 93b666c5-70c6-469f-a215-3d487c91ab3c */
static const guid_t SAN_DSM_UUID =
GUID_INIT(0x93b666c5, 0x70c6, 0x469f, 0xa2, 0x15, 0x3d,
0x48, 0x7c, 0x91, 0xab, 0x3c);
enum san_dsm_event_fn {
SAN_DSM_EVENT_FN_BAT1_STAT = 0x03,
SAN_DSM_EVENT_FN_BAT1_INFO = 0x04,
SAN_DSM_EVENT_FN_ADP1_STAT = 0x05,
SAN_DSM_EVENT_FN_ADP1_INFO = 0x06,
SAN_DSM_EVENT_FN_BAT2_STAT = 0x07,
SAN_DSM_EVENT_FN_BAT2_INFO = 0x08,
SAN_DSM_EVENT_FN_THERMAL = 0x09,
SAN_DSM_EVENT_FN_DPTF = 0x0a,
};
enum sam_event_cid_bat {
SAM_EVENT_CID_BAT_BIX = 0x15,
SAM_EVENT_CID_BAT_BST = 0x16,
SAM_EVENT_CID_BAT_ADP = 0x17,
SAM_EVENT_CID_BAT_PROT = 0x18,
SAM_EVENT_CID_BAT_DPTF = 0x4f,
};
enum sam_event_cid_tmp {
SAM_EVENT_CID_TMP_TRIP = 0x0b,
};
struct san_event_work {
struct delayed_work work;
struct device *dev;
struct ssam_event event; /* must be last */
};
static int san_acpi_notify_event(struct device *dev, u64 func,
union acpi_object *param)
{
acpi_handle san = ACPI_HANDLE(dev);
union acpi_object *obj;
int status = 0;
if (!acpi_check_dsm(san, &SAN_DSM_UUID, SAN_DSM_REVISION, BIT_ULL(func)))
return 0;
dev_dbg(dev, "notify event %#04llx\n", func);
obj = acpi_evaluate_dsm_typed(san, &SAN_DSM_UUID, SAN_DSM_REVISION,
func, param, ACPI_TYPE_BUFFER);
if (!obj)
return -EFAULT;
if (obj->buffer.length != 1 || obj->buffer.pointer[0] != 0) {
dev_err(dev, "got unexpected result from _DSM\n");
status = -EPROTO;
}
ACPI_FREE(obj);
return status;
}
static int san_evt_bat_adp(struct device *dev, const struct ssam_event *event)
{
int status;
status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_ADP1_STAT, NULL);
if (status)
return status;
/*
* Ensure that the battery states get updated correctly. When the
* battery is fully charged and an adapter is plugged in, it sometimes
* is not updated correctly, instead showing it as charging.
* Explicitly trigger battery updates to fix this.
*/
status = san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT1_STAT, NULL);
if (status)
return status;
return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_BAT2_STAT, NULL);
}
static int san_evt_bat_bix(struct device *dev, const struct ssam_event *event)
{
enum san_dsm_event_fn fn;
if (event->instance_id == 0x02)
fn = SAN_DSM_EVENT_FN_BAT2_INFO;
else
fn = SAN_DSM_EVENT_FN_BAT1_INFO;
return san_acpi_notify_event(dev, fn, NULL);
}
static int san_evt_bat_bst(struct device *dev, const struct ssam_event *event)
{
enum san_dsm_event_fn fn;
if (event->instance_id == 0x02)
fn = SAN_DSM_EVENT_FN_BAT2_STAT;
else
fn = SAN_DSM_EVENT_FN_BAT1_STAT;
return san_acpi_notify_event(dev, fn, NULL);
}
static int san_evt_bat_dptf(struct device *dev, const struct ssam_event *event)
{
union acpi_object payload;
/*
* The Surface ACPI expects a buffer and not a package. It specifically
* checks for ObjectType (Arg3) == 0x03. This will cause a warning in
* acpica/nsarguments.c, but that warning can be safely ignored.
*/
payload.type = ACPI_TYPE_BUFFER;
payload.buffer.length = event->length;
payload.buffer.pointer = (u8 *)&event->data[0];
return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_DPTF, &payload);
}
static unsigned long san_evt_bat_delay(u8 cid)
{
switch (cid) {
case SAM_EVENT_CID_BAT_ADP:
/*
* Wait for battery state to update before signaling adapter
* change.
*/
return msecs_to_jiffies(5000);
case SAM_EVENT_CID_BAT_BST:
/* Ensure we do not miss anything important due to caching. */
return msecs_to_jiffies(2000);
default:
return 0;
}
}
static bool san_evt_bat(const struct ssam_event *event, struct device *dev)
{
int status;
switch (event->command_id) {
case SAM_EVENT_CID_BAT_BIX:
status = san_evt_bat_bix(dev, event);
break;
case SAM_EVENT_CID_BAT_BST:
status = san_evt_bat_bst(dev, event);
break;
case SAM_EVENT_CID_BAT_ADP:
status = san_evt_bat_adp(dev, event);
break;
case SAM_EVENT_CID_BAT_PROT:
/*
* TODO: Implement support for battery protection status change
* event.
*/
return true;
case SAM_EVENT_CID_BAT_DPTF:
status = san_evt_bat_dptf(dev, event);
break;
default:
return false;
}
if (status) {
dev_err(dev, "error handling power event (cid = %#04x)\n",
event->command_id);
}
return true;
}
static void san_evt_bat_workfn(struct work_struct *work)
{
struct san_event_work *ev;
ev = container_of(work, struct san_event_work, work.work);
san_evt_bat(&ev->event, ev->dev);
kfree(ev);
}
static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
const struct ssam_event *event)
{
struct san_data *d = to_san_data(nf, nf_bat);
struct san_event_work *work;
unsigned long delay = san_evt_bat_delay(event->command_id);
if (delay == 0)
return san_evt_bat(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
work = kzalloc(sizeof(*work) + event->length, GFP_KERNEL);
if (!work)
return ssam_notifier_from_errno(-ENOMEM);
INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
work->dev = d->dev;
work->event = *event;
memcpy(work->event.data, event->data, event->length);
queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
}
static int san_evt_tmp_trip(struct device *dev, const struct ssam_event *event)
{
union acpi_object param;
/*
* The Surface ACPI expects an integer and not a package. This will
* cause a warning in acpica/nsarguments.c, but that warning can be
* safely ignored.
*/
param.type = ACPI_TYPE_INTEGER;
param.integer.value = event->instance_id;
return san_acpi_notify_event(dev, SAN_DSM_EVENT_FN_THERMAL, ¶m);
}
static bool san_evt_tmp(const struct ssam_event *event, struct device *dev)
{
int status;
switch (event->command_id) {
case SAM_EVENT_CID_TMP_TRIP:
status = san_evt_tmp_trip(dev, event);
break;
default:
return false;
}
if (status) {
dev_err(dev, "error handling thermal event (cid = %#04x)\n",
event->command_id);
}
return true;
}
static u32 san_evt_tmp_nf(struct ssam_event_notifier *nf,
const struct ssam_event *event)
{
struct san_data *d = to_san_data(nf, nf_tmp);
return san_evt_tmp(event, d->dev) ? SSAM_NOTIF_HANDLED : 0;
}
/* -- ACPI GSB OperationRegion handler -------------------------------------- */
struct gsb_data_in {
u8 cv;
} __packed;
struct gsb_data_rqsx {
u8 cv; /* Command value (san_gsb_request_cv). */
u8 tc; /* Target category. */
u8 tid; /* Target ID. */
u8 iid; /* Instance ID. */
u8 snc; /* Expect-response-flag. */
u8 cid; /* Command ID. */
u16 cdl; /* Payload length. */
u8 pld[]; /* Payload. */
} __packed;
struct gsb_data_etwl {
u8 cv; /* Command value (should be 0x02). */
u8 etw3; /* Unknown. */
u8 etw4; /* Unknown. */
u8 msg[]; /* Error message (ASCIIZ). */
} __packed;
struct gsb_data_out {
u8 status; /* _SSH communication status. */
u8 len; /* _SSH payload length. */
u8 pld[]; /* _SSH payload. */
} __packed;
union gsb_buffer_data {
struct gsb_data_in in; /* Common input. */
struct gsb_data_rqsx rqsx; /* RQSX input. */
struct gsb_data_etwl etwl; /* ETWL input. */
struct gsb_data_out out; /* Output. */
};
struct gsb_buffer {
u8 status; /* GSB AttribRawProcess status. */
u8 len; /* GSB AttribRawProcess length. */
union gsb_buffer_data data;
} __packed;
#define SAN_GSB_MAX_RQSX_PAYLOAD (U8_MAX - 2 - sizeof(struct gsb_data_rqsx))
#define SAN_GSB_MAX_RESPONSE (U8_MAX - 2 - sizeof(struct gsb_data_out))
#define SAN_GSB_COMMAND 0
enum san_gsb_request_cv {
SAN_GSB_REQUEST_CV_RQST = 0x01,
SAN_GSB_REQUEST_CV_ETWL = 0x02,
SAN_GSB_REQUEST_CV_RQSG = 0x03,
};
#define SAN_REQUEST_NUM_TRIES 5
static acpi_status san_etwl(struct san_data *d, struct gsb_buffer *b)
{
struct gsb_data_etwl *etwl = &b->data.etwl;
if (b->len < sizeof(struct gsb_data_etwl)) {
dev_err(d->dev, "invalid ETWL package (len = %d)\n", b->len);
return AE_OK;
}
dev_err(d->dev, "ETWL(%#04x, %#04x): %.*s\n", etwl->etw3, etwl->etw4,
(unsigned int)(b->len - sizeof(struct gsb_data_etwl)),
(char *)etwl->msg);
/* Indicate success. */
b->status = 0x00;
b->len = 0x00;
return AE_OK;
}
static
struct gsb_data_rqsx *san_validate_rqsx(struct device *dev, const char *type,
struct gsb_buffer *b)
{
struct gsb_data_rqsx *rqsx = &b->data.rqsx;
if (b->len < sizeof(struct gsb_data_rqsx)) {
dev_err(dev, "invalid %s package (len = %d)\n", type, b->len);
return NULL;
}
if (get_unaligned(&rqsx->cdl) != b->len - sizeof(struct gsb_data_rqsx)) {
dev_err(dev, "bogus %s package (len = %d, cdl = %d)\n",
type, b->len, get_unaligned(&rqsx->cdl));
return NULL;
}
if (get_unaligned(&rqsx->cdl) > SAN_GSB_MAX_RQSX_PAYLOAD) {
dev_err(dev, "payload for %s package too large (cdl = %d)\n",
type, get_unaligned(&rqsx->cdl));
return NULL;
}
return rqsx;
}
static void gsb_rqsx_response_error(struct gsb_buffer *gsb, int status)
{
gsb->status = 0x00;
gsb->len = 0x02;
gsb->data.out.status = (u8)(-status);
gsb->data.out.len = 0x00;
}
static void gsb_rqsx_response_success(struct gsb_buffer *gsb, u8 *ptr, size_t len)
{
gsb->status = 0x00;
gsb->len = len + 2;
gsb->data.out.status = 0x00;
gsb->data.out.len = len;
if (len)
memcpy(&gsb->data.out.pld[0], ptr, len);
}
static acpi_status san_rqst_fixup_suspended(struct san_data *d,
struct ssam_request *rqst,
struct gsb_buffer *gsb)
{
if (rqst->target_category == SSAM_SSH_TC_BAS && rqst->command_id == 0x0D) {
u8 base_state = 1;
/* Base state quirk:
* The base state may be queried from ACPI when the EC is still
* suspended. In this case it will return '-EPERM'. This query
* will only be triggered from the ACPI lid GPE interrupt, thus
* we are either in laptop or studio mode (base status 0x01 or
* 0x02). Furthermore, we will only get here if the device (and
* EC) have been suspended.
*
* We now assume that the device is in laptop mode (0x01). This
* has the drawback that it will wake the device when unfolding
* it in studio mode, but it also allows us to avoid actively
* waiting for the EC to wake up, which may incur a notable
* delay.
*/
dev_dbg(d->dev, "rqst: fixup: base-state quirk\n");
gsb_rqsx_response_success(gsb, &base_state, sizeof(base_state));
return AE_OK;
}
gsb_rqsx_response_error(gsb, -ENXIO);
return AE_OK;
}
static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
{
u8 rspbuf[SAN_GSB_MAX_RESPONSE];
struct gsb_data_rqsx *gsb_rqst;
struct ssam_request rqst;
struct ssam_response rsp;
int status = 0;
gsb_rqst = san_validate_rqsx(d->dev, "RQST", buffer);
if (!gsb_rqst)
return AE_OK;
rqst.target_category = gsb_rqst->tc;
rqst.target_id = gsb_rqst->tid;
rqst.command_id = gsb_rqst->cid;
rqst.instance_id = gsb_rqst->iid;
rqst.flags = gsb_rqst->snc ? SSAM_REQUEST_HAS_RESPONSE : 0;
rqst.length = get_unaligned(&gsb_rqst->cdl);
rqst.payload = &gsb_rqst->pld[0];
rsp.capacity = ARRAY_SIZE(rspbuf);
rsp.length = 0;
rsp.pointer = &rspbuf[0];
/* Handle suspended device. */
if (d->dev->power.is_suspended) {
dev_warn(d->dev, "rqst: device is suspended, not executing\n");
return san_rqst_fixup_suspended(d, &rqst, buffer);
}
status = __ssam_retry(ssam_request_do_sync_onstack, SAN_REQUEST_NUM_TRIES,
d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
if (!status) {
gsb_rqsx_response_success(buffer, rsp.pointer, rsp.length);
} else {
dev_err(d->dev, "rqst: failed with error %d\n", status);
gsb_rqsx_response_error(buffer, status);
}
return AE_OK;
}
static acpi_status san_rqsg(struct san_data *d, struct gsb_buffer *buffer)
{
struct gsb_data_rqsx *gsb_rqsg;
struct san_dgpu_event evt;
int status;
gsb_rqsg = san_validate_rqsx(d->dev, "RQSG", buffer);
if (!gsb_rqsg)
return AE_OK;
evt.category = gsb_rqsg->tc;
evt.target = gsb_rqsg->tid;
evt.command = gsb_rqsg->cid;
evt.instance = gsb_rqsg->iid;
evt.length = get_unaligned(&gsb_rqsg->cdl);
evt.payload = &gsb_rqsg->pld[0];
status = san_dgpu_notifier_call(&evt);
if (!status) {
gsb_rqsx_response_success(buffer, NULL, 0);
} else {
dev_err(d->dev, "rqsg: failed with error %d\n", status);
gsb_rqsx_response_error(buffer, status);
}
return AE_OK;
}
static acpi_status san_opreg_handler(u32 function, acpi_physical_address command,
u32 bits, u64 *value64, void *opreg_context,
void *region_context)
{
struct san_data *d = to_san_data(opreg_context, info);
struct gsb_buffer *buffer = (struct gsb_buffer *)value64;
int accessor_type = (function & 0xFFFF0000) >> 16;
if (command != SAN_GSB_COMMAND) {
dev_warn(d->dev, "unsupported command: %#04llx\n", command);
return AE_OK;
}
if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
dev_err(d->dev, "invalid access type: %#04x\n", accessor_type);
return AE_OK;
}
/* Buffer must have at least contain the command-value. */
if (buffer->len == 0) {
dev_err(d->dev, "request-package too small\n");
return AE_OK;
}
switch (buffer->data.in.cv) {
case SAN_GSB_REQUEST_CV_RQST:
return san_rqst(d, buffer);
case SAN_GSB_REQUEST_CV_ETWL:
return san_etwl(d, buffer);
case SAN_GSB_REQUEST_CV_RQSG:
return san_rqsg(d, buffer);
default:
dev_warn(d->dev, "unsupported SAN0 request (cv: %#04x)\n",
buffer->data.in.cv);
return AE_OK;
}
}
/* -- Driver setup. --------------------------------------------------------- */
static int san_events_register(struct platform_device *pdev)
{
struct san_data *d = platform_get_drvdata(pdev);
int status;
d->nf_bat.base.priority = 1;
d->nf_bat.base.fn = san_evt_bat_nf;
d->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;
d->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;
d->nf_bat.event.id.instance = 0;
d->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;
d->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;
d->nf_tmp.base.priority = 1;
d->nf_tmp.base.fn = san_evt_tmp_nf;
d->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;
d->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;
d->nf_tmp.event.id.instance = 0;
d->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;
d->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;
status = ssam_notifier_register(d->ctrl, &d->nf_bat);
if (status)
return status;
status = ssam_notifier_register(d->ctrl, &d->nf_tmp);
if (status)
ssam_notifier_unregister(d->ctrl, &d->nf_bat);
return status;
}
static void san_events_unregister(struct platform_device *pdev)
{
struct san_data *d = platform_get_drvdata(pdev);
ssam_notifier_unregister(d->ctrl, &d->nf_bat);
ssam_notifier_unregister(d->ctrl, &d->nf_tmp);
}
#define san_consumer_printk(level, dev, handle, fmt, ...) \
do { \
char *path = "<error getting consumer path>"; \
struct acpi_buffer buffer = { \
.length = ACPI_ALLOCATE_BUFFER, \
.pointer = NULL, \
}; \
\
if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) \
path = buffer.pointer; \
\
dev_##level(dev, "[%s]: " fmt, path, ##__VA_ARGS__); \
kfree(buffer.pointer); \
} while (0)
#define san_consumer_dbg(dev, handle, fmt, ...) \
san_consumer_printk(dbg, dev, handle, fmt, ##__VA_ARGS__)
#define san_consumer_warn(dev, handle, fmt, ...) \
san_consumer_printk(warn, dev, handle, fmt, ##__VA_ARGS__)
static bool is_san_consumer(struct platform_device *pdev, acpi_handle handle)
{
struct acpi_handle_list dep_devices;
acpi_handle supplier = ACPI_HANDLE(&pdev->dev);
acpi_status status;
int i;
if (!acpi_has_method(handle, "_DEP"))
return false;
status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
if (ACPI_FAILURE(status)) {
san_consumer_dbg(&pdev->dev, handle, "failed to evaluate _DEP\n");
return false;
}
for (i = 0; i < dep_devices.count; i++) {
if (dep_devices.handles[i] == supplier)
return true;
}
return false;
}
static acpi_status san_consumer_setup(acpi_handle handle, u32 lvl,
void *context, void **rv)
{
const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER;
struct platform_device *pdev = context;
struct acpi_device *adev;
struct device_link *link;
if (!is_san_consumer(pdev, handle))
return AE_OK;
/* Ignore ACPI devices that are not present. */
adev = acpi_fetch_acpi_dev(handle);
if (!adev)
return AE_OK;
san_consumer_dbg(&pdev->dev, handle, "creating device link\n");
/* Try to set up device links, ignore but log errors. */
link = device_link_add(&adev->dev, &pdev->dev, flags);
if (!link) {
san_consumer_warn(&pdev->dev, handle, "failed to create device link\n");
return AE_OK;
}
return AE_OK;
}
static int san_consumer_links_setup(struct platform_device *pdev)
{
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, san_consumer_setup, NULL,
pdev, NULL);
return status ? -EFAULT : 0;
}
static int san_probe(struct platform_device *pdev)
{
struct acpi_device *san = ACPI_COMPANION(&pdev->dev);
struct ssam_controller *ctrl;
struct san_data *data;
acpi_status astatus;
int status;
ctrl = ssam_client_bind(&pdev->dev);
if (IS_ERR(ctrl))
return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
status = san_consumer_links_setup(pdev);
if (status)
return status;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->dev = &pdev->dev;
data->ctrl = ctrl;
platform_set_drvdata(pdev, data);
astatus = acpi_install_address_space_handler(san->handle,
ACPI_ADR_SPACE_GSBUS,
&san_opreg_handler, NULL,
&data->info);
if (ACPI_FAILURE(astatus))
return -ENXIO;
status = san_events_register(pdev);
if (status)
goto err_enable_events;
status = san_set_rqsg_interface_device(&pdev->dev);
if (status)
goto err_install_dev;
acpi_dev_clear_dependencies(san);
return 0;
err_install_dev:
san_events_unregister(pdev);
err_enable_events:
acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
&san_opreg_handler);
return status;
}
static int san_remove(struct platform_device *pdev)
{
acpi_handle san = ACPI_HANDLE(&pdev->dev);
san_set_rqsg_interface_device(NULL);
acpi_remove_address_space_handler(san, ACPI_ADR_SPACE_GSBUS,
&san_opreg_handler);
san_events_unregister(pdev);
/*
* We have unregistered our event sources. Now we need to ensure that
* all delayed works they may have spawned are run to completion.
*/
flush_workqueue(san_wq);
return 0;
}
static const struct acpi_device_id san_match[] = {
{ "MSHW0091" },
{ },
};
MODULE_DEVICE_TABLE(acpi, san_match);
static struct platform_driver surface_acpi_notify = {
.probe = san_probe,
.remove = san_remove,
.driver = {
.name = "surface_acpi_notify",
.acpi_match_table = san_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init san_init(void)
{
int ret;
san_wq = alloc_workqueue("san_wq", 0, 0);
if (!san_wq)
return -ENOMEM;
ret = platform_driver_register(&surface_acpi_notify);
if (ret)
destroy_workqueue(san_wq);
return ret;
}
module_init(san_init);
static void __exit san_exit(void)
{
platform_driver_unregister(&surface_acpi_notify);
destroy_workqueue(san_wq);
}
module_exit(san_exit);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Surface ACPI Notify driver for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_acpi_notify.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the LID cover switch of the Surface 3
*
* Copyright (c) 2016 Red Hat Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
MODULE_AUTHOR("Benjamin Tissoires <[email protected]>");
MODULE_DESCRIPTION("Surface 3 platform driver");
MODULE_LICENSE("GPL");
#define ACPI_BUTTON_HID_LID "PNP0C0D"
#define SPI_CTL_OBJ_NAME "SPI"
#define SPI_TS_OBJ_NAME "NTRG"
#define SURFACE3_LID_GUID "F7CC25EC-D20B-404C-8903-0ED4359C18AE"
MODULE_ALIAS("wmi:" SURFACE3_LID_GUID);
static const struct dmi_system_id surface3_dmi_table[] = {
#if defined(CONFIG_X86)
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
},
#endif
{ }
};
struct surface3_wmi {
struct acpi_device *touchscreen_adev;
struct acpi_device *pnp0c0d_adev;
struct acpi_hotplug_context hp;
struct input_dev *input;
};
static struct platform_device *s3_wmi_pdev;
static struct surface3_wmi s3_wmi;
static DEFINE_MUTEX(s3_wmi_lock);
static int s3_wmi_query_block(const char *guid, int instance, int *ret)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj = NULL;
acpi_status status;
int error = 0;
mutex_lock(&s3_wmi_lock);
status = wmi_query_block(guid, instance, &output);
if (ACPI_FAILURE(status)) {
error = -EIO;
goto out_free_unlock;
}
obj = output.pointer;
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
pr_err("query block returned object type: %d - buffer length:%d\n",
obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
}
error = -EINVAL;
goto out_free_unlock;
}
*ret = obj->integer.value;
out_free_unlock:
kfree(obj);
mutex_unlock(&s3_wmi_lock);
return error;
}
static inline int s3_wmi_query_lid(int *ret)
{
return s3_wmi_query_block(SURFACE3_LID_GUID, 0, ret);
}
static int s3_wmi_send_lid_state(void)
{
int ret, lid_sw;
ret = s3_wmi_query_lid(&lid_sw);
if (ret)
return ret;
input_report_switch(s3_wmi.input, SW_LID, lid_sw);
input_sync(s3_wmi.input);
return 0;
}
static int s3_wmi_hp_notify(struct acpi_device *adev, u32 value)
{
return s3_wmi_send_lid_state();
}
static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
u32 level,
void *data,
void **return_value)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct acpi_device **ts_adev = data;
if (!adev || strncmp(acpi_device_bid(adev), SPI_TS_OBJ_NAME,
strlen(SPI_TS_OBJ_NAME)))
return AE_OK;
if (*ts_adev) {
pr_err("duplicate entry %s\n", SPI_TS_OBJ_NAME);
return AE_OK;
}
*ts_adev = adev;
return AE_OK;
}
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_device *ts_adev = NULL;
acpi_status status;
/* ignore non ACPI devices */
if (!adev)
return 0;
/* check for LID ACPI switch */
if (!strcmp(ACPI_BUTTON_HID_LID, acpi_device_hid(adev))) {
s3_wmi.pnp0c0d_adev = adev;
return 0;
}
/* ignore non SPI controllers */
if (strncmp(acpi_device_bid(adev), SPI_CTL_OBJ_NAME,
strlen(SPI_CTL_OBJ_NAME)))
return 0;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, adev->handle, 1,
s3_wmi_attach_spi_device, NULL,
&ts_adev, NULL);
if (ACPI_FAILURE(status))
dev_warn(dev, "failed to enumerate SPI slaves\n");
if (!ts_adev)
return 0;
s3_wmi.touchscreen_adev = ts_adev;
return 0;
}
static int s3_wmi_create_and_register_input(struct platform_device *pdev)
{
struct input_dev *input;
int error;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
input->name = "Lid Switch";
input->phys = "button/input0";
input->id.bustype = BUS_HOST;
input->id.product = 0x0005;
input_set_capability(input, EV_SW, SW_LID);
error = input_register_device(input);
if (error)
return error;
s3_wmi.input = input;
return 0;
}
static int __init s3_wmi_probe(struct platform_device *pdev)
{
int error;
if (!dmi_check_system(surface3_dmi_table))
return -ENODEV;
memset(&s3_wmi, 0, sizeof(s3_wmi));
bus_for_each_dev(&platform_bus_type, NULL, NULL,
s3_wmi_check_platform_device);
if (!s3_wmi.touchscreen_adev)
return -ENODEV;
acpi_bus_trim(s3_wmi.pnp0c0d_adev);
error = s3_wmi_create_and_register_input(pdev);
if (error)
goto restore_acpi_lid;
acpi_initialize_hp_context(s3_wmi.touchscreen_adev, &s3_wmi.hp,
s3_wmi_hp_notify, NULL);
s3_wmi_send_lid_state();
return 0;
restore_acpi_lid:
acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
return error;
}
static int s3_wmi_remove(struct platform_device *device)
{
/* remove the hotplug context from the acpi device */
s3_wmi.touchscreen_adev->hp = NULL;
/* reinstall the actual PNPC0C0D LID default handle */
acpi_bus_scan(s3_wmi.pnp0c0d_adev->handle);
return 0;
}
static int __maybe_unused s3_wmi_resume(struct device *dev)
{
s3_wmi_send_lid_state();
return 0;
}
static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
static struct platform_driver s3_wmi_driver = {
.driver = {
.name = "surface3-wmi",
.pm = &s3_wmi_pm,
},
.remove = s3_wmi_remove,
};
static int __init s3_wmi_init(void)
{
int error;
s3_wmi_pdev = platform_device_alloc("surface3-wmi", -1);
if (!s3_wmi_pdev)
return -ENOMEM;
error = platform_device_add(s3_wmi_pdev);
if (error)
goto err_device_put;
error = platform_driver_probe(&s3_wmi_driver, s3_wmi_probe);
if (error)
goto err_device_del;
pr_info("Surface 3 WMI Extras loaded\n");
return 0;
err_device_del:
platform_device_del(s3_wmi_pdev);
err_device_put:
platform_device_put(s3_wmi_pdev);
return error;
}
static void __exit s3_wmi_exit(void)
{
platform_device_unregister(s3_wmi_pdev);
platform_driver_unregister(&s3_wmi_driver);
}
module_init(s3_wmi_init);
module_exit(s3_wmi_exit);
| linux-master | drivers/platform/surface/surface3-wmi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface System Aggregator Module (SSAM) client device registry.
*
* Registry for non-platform/non-ACPI SSAM client devices, i.e. devices that
* cannot be auto-detected. Provides device-hubs and performs instantiation
* for these devices.
*
* Copyright (C) 2020-2022 Maximilian Luz <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/types.h>
#include <linux/surface_aggregator/device.h>
/* -- Device registry. ------------------------------------------------------ */
/*
* SSAM device names follow the SSAM module alias, meaning they are prefixed
* with 'ssam:', followed by domain, category, target ID, instance ID, and
* function, each encoded as two-digit hexadecimal, separated by ':'. In other
* words, it follows the scheme
*
* ssam:dd:cc:tt:ii:ff
*
* Where, 'dd', 'cc', 'tt', 'ii', and 'ff' are the two-digit hexadecimal
* values mentioned above, respectively.
*/
/* Root node. */
static const struct software_node ssam_node_root = {
.name = "ssam_platform_hub",
};
/* KIP device hub (connects keyboard cover devices on Surface Pro 8). */
static const struct software_node ssam_node_hub_kip = {
.name = "ssam:00:00:01:0e:00",
.parent = &ssam_node_root,
};
/* Base device hub (devices attached to Surface Book 3 base). */
static const struct software_node ssam_node_hub_base = {
.name = "ssam:00:00:01:11:00",
.parent = &ssam_node_root,
};
/* AC adapter. */
static const struct software_node ssam_node_bat_ac = {
.name = "ssam:01:02:01:01:01",
.parent = &ssam_node_root,
};
/* Primary battery. */
static const struct software_node ssam_node_bat_main = {
.name = "ssam:01:02:01:01:00",
.parent = &ssam_node_root,
};
/* Secondary battery (Surface Book 3). */
static const struct software_node ssam_node_bat_sb3base = {
.name = "ssam:01:02:02:01:00",
.parent = &ssam_node_hub_base,
};
/* Platform profile / performance-mode device. */
static const struct software_node ssam_node_tmp_pprof = {
.name = "ssam:01:03:01:00:01",
.parent = &ssam_node_root,
};
/* Tablet-mode switch via KIP subsystem. */
static const struct software_node ssam_node_kip_tablet_switch = {
.name = "ssam:01:0e:01:00:01",
.parent = &ssam_node_root,
};
/* DTX / detachment-system device (Surface Book 3). */
static const struct software_node ssam_node_bas_dtx = {
.name = "ssam:01:11:01:00:00",
.parent = &ssam_node_root,
};
/* HID keyboard (SAM, TID=1). */
static const struct software_node ssam_node_hid_sam_keyboard = {
.name = "ssam:01:15:01:01:00",
.parent = &ssam_node_root,
};
/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
static const struct software_node ssam_node_hid_sam_penstash = {
.name = "ssam:01:15:01:02:00",
.parent = &ssam_node_root,
};
/* HID touchpad (SAM, TID=1). */
static const struct software_node ssam_node_hid_sam_touchpad = {
.name = "ssam:01:15:01:03:00",
.parent = &ssam_node_root,
};
/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
static const struct software_node ssam_node_hid_sam_sensors = {
.name = "ssam:01:15:01:06:00",
.parent = &ssam_node_root,
};
/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
.name = "ssam:01:15:01:07:00",
.parent = &ssam_node_root,
};
/* HID system controls (SAM, TID=1). */
static const struct software_node ssam_node_hid_sam_sysctrl = {
.name = "ssam:01:15:01:08:00",
.parent = &ssam_node_root,
};
/* HID keyboard. */
static const struct software_node ssam_node_hid_main_keyboard = {
.name = "ssam:01:15:02:01:00",
.parent = &ssam_node_root,
};
/* HID touchpad. */
static const struct software_node ssam_node_hid_main_touchpad = {
.name = "ssam:01:15:02:03:00",
.parent = &ssam_node_root,
};
/* HID device instance 5 (unknown HID device). */
static const struct software_node ssam_node_hid_main_iid5 = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_root,
};
/* HID keyboard (base hub). */
static const struct software_node ssam_node_hid_base_keyboard = {
.name = "ssam:01:15:02:01:00",
.parent = &ssam_node_hub_base,
};
/* HID touchpad (base hub). */
static const struct software_node ssam_node_hid_base_touchpad = {
.name = "ssam:01:15:02:03:00",
.parent = &ssam_node_hub_base,
};
/* HID device instance 5 (unknown HID device, base hub). */
static const struct software_node ssam_node_hid_base_iid5 = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_base,
};
/* HID device instance 6 (unknown HID device, base hub). */
static const struct software_node ssam_node_hid_base_iid6 = {
.name = "ssam:01:15:02:06:00",
.parent = &ssam_node_hub_base,
};
/* HID keyboard (KIP hub). */
static const struct software_node ssam_node_hid_kip_keyboard = {
.name = "ssam:01:15:02:01:00",
.parent = &ssam_node_hub_kip,
};
/* HID pen stash (KIP hub; pen taken / stashed away evens). */
static const struct software_node ssam_node_hid_kip_penstash = {
.name = "ssam:01:15:02:02:00",
.parent = &ssam_node_hub_kip,
};
/* HID touchpad (KIP hub). */
static const struct software_node ssam_node_hid_kip_touchpad = {
.name = "ssam:01:15:02:03:00",
.parent = &ssam_node_hub_kip,
};
/* HID device instance 5 (KIP hub, type-cover firmware update). */
static const struct software_node ssam_node_hid_kip_fwupd = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_kip,
};
/* Tablet-mode switch via POS subsystem. */
static const struct software_node ssam_node_pos_tablet_switch = {
.name = "ssam:01:26:01:00:01",
.parent = &ssam_node_root,
};
/*
* Devices for 5th- and 6th-generations models:
* - Surface Book 2,
* - Surface Laptop 1 and 2,
* - Surface Pro 5 and 6.
*/
static const struct software_node *ssam_node_group_gen5[] = {
&ssam_node_root,
&ssam_node_tmp_pprof,
NULL,
};
/* Devices for Surface Book 3. */
static const struct software_node *ssam_node_group_sb3[] = {
&ssam_node_root,
&ssam_node_hub_base,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_bat_sb3base,
&ssam_node_tmp_pprof,
&ssam_node_bas_dtx,
&ssam_node_hid_base_keyboard,
&ssam_node_hid_base_touchpad,
&ssam_node_hid_base_iid5,
&ssam_node_hid_base_iid6,
NULL,
};
/* Devices for Surface Laptop 3 and 4. */
static const struct software_node *ssam_node_group_sl3[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_hid_main_keyboard,
&ssam_node_hid_main_touchpad,
&ssam_node_hid_main_iid5,
NULL,
};
/* Devices for Surface Laptop 5. */
static const struct software_node *ssam_node_group_sl5[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_hid_main_keyboard,
&ssam_node_hid_main_touchpad,
&ssam_node_hid_main_iid5,
&ssam_node_hid_sam_ucm_ucsi,
NULL,
};
/* Devices for Surface Laptop Studio. */
static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_sam_keyboard,
&ssam_node_hid_sam_penstash,
&ssam_node_hid_sam_touchpad,
&ssam_node_hid_sam_sensors,
&ssam_node_hid_sam_ucm_ucsi,
&ssam_node_hid_sam_sysctrl,
NULL,
};
/* Devices for Surface Laptop Go. */
static const struct software_node *ssam_node_group_slg1[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
NULL,
};
/* Devices for Surface Pro 7 and Surface Pro 7+. */
static const struct software_node *ssam_node_group_sp7[] = {
&ssam_node_root,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
NULL,
};
/* Devices for Surface Pro 8 */
static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_root,
&ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_kip_tablet_switch,
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
&ssam_node_hid_kip_fwupd,
&ssam_node_hid_sam_sensors,
&ssam_node_hid_sam_ucm_ucsi,
NULL,
};
/* Devices for Surface Pro 9 */
static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_root,
&ssam_node_hub_kip,
&ssam_node_bat_ac,
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
&ssam_node_hid_kip_fwupd,
&ssam_node_hid_sam_sensors,
&ssam_node_hid_sam_ucm_ucsi,
NULL,
};
/* -- SSAM platform/meta-hub driver. ---------------------------------------- */
static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Pro 4, 5, and 6 (OMBR < 0x10) */
{ "MSHW0081", (unsigned long)ssam_node_group_gen5 },
/* Surface Pro 6 (OMBR >= 0x10) */
{ "MSHW0111", (unsigned long)ssam_node_group_gen5 },
/* Surface Pro 7 */
{ "MSHW0116", (unsigned long)ssam_node_group_sp7 },
/* Surface Pro 7+ */
{ "MSHW0119", (unsigned long)ssam_node_group_sp7 },
/* Surface Pro 8 */
{ "MSHW0263", (unsigned long)ssam_node_group_sp8 },
/* Surface Pro 9 */
{ "MSHW0343", (unsigned long)ssam_node_group_sp9 },
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
/* Surface Book 3 */
{ "MSHW0117", (unsigned long)ssam_node_group_sb3 },
/* Surface Laptop 1 */
{ "MSHW0086", (unsigned long)ssam_node_group_gen5 },
/* Surface Laptop 2 */
{ "MSHW0112", (unsigned long)ssam_node_group_gen5 },
/* Surface Laptop 3 (13", Intel) */
{ "MSHW0114", (unsigned long)ssam_node_group_sl3 },
/* Surface Laptop 3 (15", AMD) and 4 (15", AMD) */
{ "MSHW0110", (unsigned long)ssam_node_group_sl3 },
/* Surface Laptop 4 (13", Intel) */
{ "MSHW0250", (unsigned long)ssam_node_group_sl3 },
/* Surface Laptop 5 */
{ "MSHW0350", (unsigned long)ssam_node_group_sl5 },
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
/* Surface Laptop Go 2 */
{ "MSHW0290", (unsigned long)ssam_node_group_slg1 },
/* Surface Laptop Studio */
{ "MSHW0123", (unsigned long)ssam_node_group_sls },
{ },
};
MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
static int ssam_platform_hub_probe(struct platform_device *pdev)
{
const struct software_node **nodes;
struct ssam_controller *ctrl;
struct fwnode_handle *root;
int status;
nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev);
if (!nodes)
return -ENODEV;
/*
* As we're adding the SSAM client devices as children under this device
* and not the SSAM controller, we need to add a device link to the
* controller to ensure that we remove all of our devices before the
* controller is removed. This also guarantees proper ordering for
* suspend/resume of the devices on this hub.
*/
ctrl = ssam_client_bind(&pdev->dev);
if (IS_ERR(ctrl))
return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
status = software_node_register_node_group(nodes);
if (status)
return status;
root = software_node_fwnode(&ssam_node_root);
if (!root) {
software_node_unregister_node_group(nodes);
return -ENOENT;
}
set_secondary_fwnode(&pdev->dev, root);
status = __ssam_register_clients(&pdev->dev, ctrl, root);
if (status) {
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
}
platform_set_drvdata(pdev, nodes);
return status;
}
static int ssam_platform_hub_remove(struct platform_device *pdev)
{
const struct software_node **nodes = platform_get_drvdata(pdev);
ssam_remove_clients(&pdev->dev);
set_secondary_fwnode(&pdev->dev, NULL);
software_node_unregister_node_group(nodes);
return 0;
}
static struct platform_driver ssam_platform_hub_driver = {
.probe = ssam_platform_hub_probe,
.remove = ssam_platform_hub_remove,
.driver = {
.name = "surface_aggregator_platform_hub",
.acpi_match_table = ssam_platform_hub_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(ssam_platform_hub_driver);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Device-registry for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_aggregator_registry.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Supports for the power IC on the Surface 3 tablet.
*
* (C) Copyright 2016-2018 Red Hat, Inc
* (C) Copyright 2016-2018 Benjamin Tissoires <[email protected]>
* (C) Copyright 2016 Stephen Just <[email protected]>
*
* This driver has been reverse-engineered by parsing the DSDT of the Surface 3
* and looking at the registers of the chips.
*
* The DSDT allowed to find out that:
* - the driver is required for the ACPI BAT0 device to communicate to the chip
* through an operation region.
* - the various defines for the operation region functions to communicate with
* this driver
* - the DSM 3f99e367-6220-4955-8b0f-06ef2ae79412 allows to trigger ACPI
* events to BAT0 (the code is all available in the DSDT).
*
* Further findings regarding the 2 chips declared in the MSHW0011 are:
* - there are 2 chips declared:
* . 0x22 seems to control the ADP1 line status (and probably the charger)
* . 0x55 controls the battery directly
* - the battery chip uses a SMBus protocol (using plain SMBus allows non
* destructive commands):
* . the commands/registers used are in the range 0x00..0x7F
* . if bit 8 (0x80) is set in the SMBus command, the returned value is the
* same as when it is not set. There is a high chance this bit is the
* read/write
* . the various registers semantic as been deduced by observing the register
* dumps.
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/freezer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uuid.h>
#include <asm/unaligned.h>
#define SURFACE_3_POLL_INTERVAL (2 * HZ)
#define SURFACE_3_STRLEN 10
struct mshw0011_data {
struct i2c_client *adp1;
struct i2c_client *bat0;
unsigned short notify_mask;
struct task_struct *poll_task;
bool kthread_running;
bool charging;
bool bat_charging;
u8 trip_point;
s32 full_capacity;
};
struct mshw0011_handler_data {
struct acpi_connection_info info;
struct i2c_client *client;
};
struct bix {
u32 revision;
u32 power_unit;
u32 design_capacity;
u32 last_full_charg_capacity;
u32 battery_technology;
u32 design_voltage;
u32 design_capacity_of_warning;
u32 design_capacity_of_low;
u32 cycle_count;
u32 measurement_accuracy;
u32 max_sampling_time;
u32 min_sampling_time;
u32 max_average_interval;
u32 min_average_interval;
u32 battery_capacity_granularity_1;
u32 battery_capacity_granularity_2;
char model[SURFACE_3_STRLEN];
char serial[SURFACE_3_STRLEN];
char type[SURFACE_3_STRLEN];
char OEM[SURFACE_3_STRLEN];
} __packed;
struct bst {
u32 battery_state;
s32 battery_present_rate;
u32 battery_remaining_capacity;
u32 battery_present_voltage;
} __packed;
struct gsb_command {
u8 arg0;
u8 arg1;
u8 arg2;
} __packed;
struct gsb_buffer {
u8 status;
u8 len;
u8 ret;
union {
struct gsb_command cmd;
struct bst bst;
struct bix bix;
} __packed;
} __packed;
#define ACPI_BATTERY_STATE_DISCHARGING BIT(0)
#define ACPI_BATTERY_STATE_CHARGING BIT(1)
#define ACPI_BATTERY_STATE_CRITICAL BIT(2)
#define MSHW0011_CMD_DEST_BAT0 0x01
#define MSHW0011_CMD_DEST_ADP1 0x03
#define MSHW0011_CMD_BAT0_STA 0x01
#define MSHW0011_CMD_BAT0_BIX 0x02
#define MSHW0011_CMD_BAT0_BCT 0x03
#define MSHW0011_CMD_BAT0_BTM 0x04
#define MSHW0011_CMD_BAT0_BST 0x05
#define MSHW0011_CMD_BAT0_BTP 0x06
#define MSHW0011_CMD_ADP1_PSR 0x07
#define MSHW0011_CMD_BAT0_PSOC 0x09
#define MSHW0011_CMD_BAT0_PMAX 0x0a
#define MSHW0011_CMD_BAT0_PSRC 0x0b
#define MSHW0011_CMD_BAT0_CHGI 0x0c
#define MSHW0011_CMD_BAT0_ARTG 0x0d
#define MSHW0011_NOTIFY_GET_VERSION 0x00
#define MSHW0011_NOTIFY_ADP1 0x01
#define MSHW0011_NOTIFY_BAT0_BST 0x02
#define MSHW0011_NOTIFY_BAT0_BIX 0x05
#define MSHW0011_ADP1_REG_PSR 0x04
#define MSHW0011_BAT0_REG_CAPACITY 0x0c
#define MSHW0011_BAT0_REG_FULL_CHG_CAPACITY 0x0e
#define MSHW0011_BAT0_REG_DESIGN_CAPACITY 0x40
#define MSHW0011_BAT0_REG_VOLTAGE 0x08
#define MSHW0011_BAT0_REG_RATE 0x14
#define MSHW0011_BAT0_REG_OEM 0x45
#define MSHW0011_BAT0_REG_TYPE 0x4e
#define MSHW0011_BAT0_REG_SERIAL_NO 0x56
#define MSHW0011_BAT0_REG_CYCLE_CNT 0x6e
#define MSHW0011_EV_2_5_MASK GENMASK(8, 0)
/* 3f99e367-6220-4955-8b0f-06ef2ae79412 */
static const guid_t mshw0011_guid =
GUID_INIT(0x3F99E367, 0x6220, 0x4955, 0x8B, 0x0F, 0x06, 0xEF,
0x2A, 0xE7, 0x94, 0x12);
static int
mshw0011_notify(struct mshw0011_data *cdata, u8 arg1, u8 arg2,
unsigned int *ret_value)
{
union acpi_object *obj;
acpi_handle handle;
unsigned int i;
handle = ACPI_HANDLE(&cdata->adp1->dev);
if (!handle)
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, &mshw0011_guid, arg1, arg2, NULL,
ACPI_TYPE_BUFFER);
if (!obj) {
dev_err(&cdata->adp1->dev, "device _DSM execution failed\n");
return -ENODEV;
}
*ret_value = 0;
for (i = 0; i < obj->buffer.length; i++)
*ret_value |= obj->buffer.pointer[i] << (i * 8);
ACPI_FREE(obj);
return 0;
}
static const struct bix default_bix = {
.revision = 0x00,
.power_unit = 0x01,
.design_capacity = 0x1dca,
.last_full_charg_capacity = 0x1dca,
.battery_technology = 0x01,
.design_voltage = 0x10df,
.design_capacity_of_warning = 0x8f,
.design_capacity_of_low = 0x47,
.cycle_count = 0xffffffff,
.measurement_accuracy = 0x00015f90,
.max_sampling_time = 0x03e8,
.min_sampling_time = 0x03e8,
.max_average_interval = 0x03e8,
.min_average_interval = 0x03e8,
.battery_capacity_granularity_1 = 0x45,
.battery_capacity_granularity_2 = 0x11,
.model = "P11G8M",
.serial = "",
.type = "LION",
.OEM = "",
};
static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
{
struct i2c_client *client = cdata->bat0;
char buf[SURFACE_3_STRLEN];
int ret;
*bix = default_bix;
/* get design capacity */
ret = i2c_smbus_read_word_data(client,
MSHW0011_BAT0_REG_DESIGN_CAPACITY);
if (ret < 0) {
dev_err(&client->dev, "Error reading design capacity: %d\n",
ret);
return ret;
}
bix->design_capacity = ret;
/* get last full charge capacity */
ret = i2c_smbus_read_word_data(client,
MSHW0011_BAT0_REG_FULL_CHG_CAPACITY);
if (ret < 0) {
dev_err(&client->dev,
"Error reading last full charge capacity: %d\n", ret);
return ret;
}
bix->last_full_charg_capacity = ret;
/*
* Get serial number, on some devices (with unofficial replacement
* battery?) reading any of the serial number range addresses gets
* nacked in this case just leave the serial number empty.
*/
ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
sizeof(buf), buf);
if (ret == -EREMOTEIO) {
/* no serial number available */
} else if (ret != sizeof(buf)) {
dev_err(&client->dev, "Error reading serial no: %d\n", ret);
return ret;
} else {
snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
}
/* get cycle count */
ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
if (ret < 0) {
dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
return ret;
}
bix->cycle_count = ret;
/* get OEM name */
ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_OEM,
4, buf);
if (ret != 4) {
dev_err(&client->dev, "Error reading cycle count: %d\n", ret);
return ret;
}
snprintf(bix->OEM, ARRAY_SIZE(bix->OEM), "%3pE", buf);
return 0;
}
static int mshw0011_bst(struct mshw0011_data *cdata, struct bst *bst)
{
struct i2c_client *client = cdata->bat0;
int rate, capacity, voltage, state;
s16 tmp;
rate = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_RATE);
if (rate < 0)
return rate;
capacity = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CAPACITY);
if (capacity < 0)
return capacity;
voltage = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_VOLTAGE);
if (voltage < 0)
return voltage;
tmp = rate;
bst->battery_present_rate = abs((s32)tmp);
state = 0;
if ((s32) tmp > 0)
state |= ACPI_BATTERY_STATE_CHARGING;
else if ((s32) tmp < 0)
state |= ACPI_BATTERY_STATE_DISCHARGING;
bst->battery_state = state;
bst->battery_remaining_capacity = capacity;
bst->battery_present_voltage = voltage;
return 0;
}
static int mshw0011_adp_psr(struct mshw0011_data *cdata)
{
return i2c_smbus_read_byte_data(cdata->adp1, MSHW0011_ADP1_REG_PSR);
}
static int mshw0011_isr(struct mshw0011_data *cdata)
{
struct bst bst;
struct bix bix;
int ret;
bool status, bat_status;
ret = mshw0011_adp_psr(cdata);
if (ret < 0)
return ret;
status = ret;
if (status != cdata->charging)
mshw0011_notify(cdata, cdata->notify_mask,
MSHW0011_NOTIFY_ADP1, &ret);
cdata->charging = status;
ret = mshw0011_bst(cdata, &bst);
if (ret < 0)
return ret;
bat_status = bst.battery_state;
if (bat_status != cdata->bat_charging)
mshw0011_notify(cdata, cdata->notify_mask,
MSHW0011_NOTIFY_BAT0_BST, &ret);
cdata->bat_charging = bat_status;
ret = mshw0011_bix(cdata, &bix);
if (ret < 0)
return ret;
if (bix.last_full_charg_capacity != cdata->full_capacity)
mshw0011_notify(cdata, cdata->notify_mask,
MSHW0011_NOTIFY_BAT0_BIX, &ret);
cdata->full_capacity = bix.last_full_charg_capacity;
return 0;
}
static int mshw0011_poll_task(void *data)
{
struct mshw0011_data *cdata = data;
int ret = 0;
cdata->kthread_running = true;
set_freezable();
while (!kthread_should_stop()) {
schedule_timeout_interruptible(SURFACE_3_POLL_INTERVAL);
try_to_freeze();
ret = mshw0011_isr(data);
if (ret)
break;
}
cdata->kthread_running = false;
return ret;
}
static acpi_status
mshw0011_space_handler(u32 function, acpi_physical_address command,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
struct mshw0011_handler_data *data = handler_context;
struct acpi_connection_info *info = &data->info;
struct acpi_resource_i2c_serialbus *sb;
struct i2c_client *client = data->client;
struct mshw0011_data *cdata = i2c_get_clientdata(client);
struct acpi_resource *ares;
u32 accessor_type = function >> 16;
acpi_status ret;
int status = 1;
ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
if (ACPI_FAILURE(ret))
return ret;
if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) {
ret = AE_BAD_PARAMETER;
goto err;
}
if (accessor_type != ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS) {
ret = AE_BAD_PARAMETER;
goto err;
}
if (gsb->cmd.arg0 == MSHW0011_CMD_DEST_ADP1 &&
gsb->cmd.arg1 == MSHW0011_CMD_ADP1_PSR) {
status = mshw0011_adp_psr(cdata);
if (status >= 0) {
ret = AE_OK;
goto out;
} else {
ret = AE_ERROR;
goto err;
}
}
if (gsb->cmd.arg0 != MSHW0011_CMD_DEST_BAT0) {
ret = AE_BAD_PARAMETER;
goto err;
}
switch (gsb->cmd.arg1) {
case MSHW0011_CMD_BAT0_STA:
break;
case MSHW0011_CMD_BAT0_BIX:
ret = mshw0011_bix(cdata, &gsb->bix);
break;
case MSHW0011_CMD_BAT0_BTP:
cdata->trip_point = gsb->cmd.arg2;
break;
case MSHW0011_CMD_BAT0_BST:
ret = mshw0011_bst(cdata, &gsb->bst);
break;
default:
dev_info(&cdata->bat0->dev, "command(0x%02x) is not supported.\n", gsb->cmd.arg1);
ret = AE_BAD_PARAMETER;
goto err;
}
out:
gsb->ret = status;
gsb->status = 0;
err:
ACPI_FREE(ares);
return ret;
}
static int mshw0011_install_space_handler(struct i2c_client *client)
{
struct acpi_device *adev;
struct mshw0011_handler_data *data;
acpi_status status;
adev = ACPI_COMPANION(&client->dev);
if (!adev)
return -ENODEV;
data = kzalloc(sizeof(struct mshw0011_handler_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
status = acpi_bus_attach_private_data(adev->handle, (void *)data);
if (ACPI_FAILURE(status)) {
kfree(data);
return -ENOMEM;
}
status = acpi_install_address_space_handler(adev->handle,
ACPI_ADR_SPACE_GSBUS,
&mshw0011_space_handler,
NULL,
data);
if (ACPI_FAILURE(status)) {
dev_err(&client->dev, "Error installing i2c space handler\n");
acpi_bus_detach_private_data(adev->handle);
kfree(data);
return -ENOMEM;
}
acpi_dev_clear_dependencies(adev);
return 0;
}
static void mshw0011_remove_space_handler(struct i2c_client *client)
{
struct mshw0011_handler_data *data;
acpi_handle handle;
acpi_status status;
handle = ACPI_HANDLE(&client->dev);
if (!handle)
return;
acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
&mshw0011_space_handler);
status = acpi_bus_get_private_data(handle, (void **)&data);
if (ACPI_SUCCESS(status))
kfree(data);
acpi_bus_detach_private_data(handle);
}
static int mshw0011_probe(struct i2c_client *client)
{
struct i2c_board_info board_info;
struct device *dev = &client->dev;
struct i2c_client *bat0;
struct mshw0011_data *data;
int error, mask;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->adp1 = client;
i2c_set_clientdata(client, data);
memset(&board_info, 0, sizeof(board_info));
strscpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
bat0 = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(bat0))
return PTR_ERR(bat0);
data->bat0 = bat0;
i2c_set_clientdata(bat0, data);
error = mshw0011_notify(data, 1, MSHW0011_NOTIFY_GET_VERSION, &mask);
if (error)
goto out_err;
data->notify_mask = mask == MSHW0011_EV_2_5_MASK;
data->poll_task = kthread_run(mshw0011_poll_task, data, "mshw0011_adp");
if (IS_ERR(data->poll_task)) {
error = PTR_ERR(data->poll_task);
dev_err(&client->dev, "Unable to run kthread err %d\n", error);
goto out_err;
}
error = mshw0011_install_space_handler(client);
if (error)
goto out_err;
return 0;
out_err:
if (data->kthread_running)
kthread_stop(data->poll_task);
i2c_unregister_device(data->bat0);
return error;
}
static void mshw0011_remove(struct i2c_client *client)
{
struct mshw0011_data *cdata = i2c_get_clientdata(client);
mshw0011_remove_space_handler(client);
if (cdata->kthread_running)
kthread_stop(cdata->poll_task);
i2c_unregister_device(cdata->bat0);
}
static const struct acpi_device_id mshw0011_acpi_match[] = {
{ "MSHW0011", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, mshw0011_acpi_match);
static struct i2c_driver mshw0011_driver = {
.probe = mshw0011_probe,
.remove = mshw0011_remove,
.driver = {
.name = "mshw0011",
.acpi_match_table = mshw0011_acpi_match,
},
};
module_i2c_driver(mshw0011_driver);
MODULE_AUTHOR("Benjamin Tissoires <[email protected]>");
MODULE_DESCRIPTION("mshw0011 driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/surface/surface3_power.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* power/home/volume button support for
* Microsoft Surface Pro 3/4 tablet.
*
* Copyright (c) 2015 Intel Corporation.
* All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/acpi.h>
#include <acpi/button.h>
#define SURFACE_PRO3_BUTTON_HID "MSHW0028"
#define SURFACE_PRO4_BUTTON_HID "MSHW0040"
#define SURFACE_BUTTON_OBJ_NAME "VGBI"
#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
#define MSHW0040_DSM_REVISION 0x01
#define MSHW0040_DSM_GET_OMPR 0x02 // get OEM Platform Revision
static const guid_t MSHW0040_DSM_UUID =
GUID_INIT(0x6fd05c69, 0xcde3, 0x49f4, 0x95, 0xed, 0xab, 0x16, 0x65,
0x49, 0x80, 0x35);
#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
#define SURFACE_BUTTON_NOTIFY_PRESS_HOME 0xc4
#define SURFACE_BUTTON_NOTIFY_RELEASE_HOME 0xc5
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
MODULE_AUTHOR("Chen Yu");
MODULE_DESCRIPTION("Surface Pro3 Button Driver");
MODULE_LICENSE("GPL v2");
/*
* Power button, Home button, Volume buttons support is supposed to
* be covered by drivers/input/misc/soc_button_array.c, which is implemented
* according to "Windows ACPI Design Guide for SoC Platforms".
* However surface pro3 seems not to obey the specs, instead it uses
* device VGBI(MSHW0028) for dispatching the events.
* We choose acpi_driver rather than platform_driver/i2c_driver because
* although VGBI has an i2c resource connected to i2c controller, it
* is not embedded in any i2c controller's scope, thus neither platform_device
* will be created, nor i2c_client will be enumerated, we have to use
* acpi_driver.
*/
static const struct acpi_device_id surface_button_device_ids[] = {
{SURFACE_PRO3_BUTTON_HID, 0},
{SURFACE_PRO4_BUTTON_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, surface_button_device_ids);
struct surface_button {
unsigned int type;
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
bool suspended;
};
static void surface_button_notify(struct acpi_device *device, u32 event)
{
struct surface_button *button = acpi_driver_data(device);
struct input_dev *input;
int key_code = KEY_RESERVED;
bool pressed = false;
switch (event) {
/* Power button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_POWER:
pressed = true;
fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_POWER:
key_code = KEY_POWER;
break;
/* Home button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_HOME:
pressed = true;
fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_HOME:
key_code = KEY_LEFTMETA;
break;
/* Volume up button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP:
pressed = true;
fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP:
key_code = KEY_VOLUMEUP;
break;
/* Volume down button press,release handle */
case SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN:
pressed = true;
fallthrough;
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
dev_warn_once(&device->dev, "Tablet mode is not supported\n");
break;
default:
dev_info_ratelimited(&device->dev,
"Unsupported event [0x%x]\n", event);
break;
}
input = button->input;
if (key_code == KEY_RESERVED)
return;
if (pressed)
pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
input_sync(input);
}
#ifdef CONFIG_PM_SLEEP
static int surface_button_suspend(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct surface_button *button = acpi_driver_data(device);
button->suspended = true;
return 0;
}
static int surface_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct surface_button *button = acpi_driver_data(device);
button->suspended = false;
return 0;
}
#endif
/*
* Surface Pro 4 and Surface Book 2 / Surface Pro 2017 use the same device
* ID (MSHW0040) for the power/volume buttons. Make sure this is the right
* device by checking for the _DSM method and OEM Platform Revision.
*
* Returns true if the driver should bind to this device, i.e. the device is
* either MSWH0028 (Pro 3) or MSHW0040 on a Pro 4 or Book 1.
*/
static bool surface_button_check_MSHW0040(struct acpi_device *dev)
{
acpi_handle handle = dev->handle;
union acpi_object *result;
u64 oem_platform_rev = 0; // valid revisions are nonzero
// get OEM platform revision
result = acpi_evaluate_dsm_typed(handle, &MSHW0040_DSM_UUID,
MSHW0040_DSM_REVISION,
MSHW0040_DSM_GET_OMPR,
NULL, ACPI_TYPE_INTEGER);
/*
* If evaluating the _DSM fails, the method is not present. This means
* that we have either MSHW0028 or MSHW0040 on Pro 4 or Book 1, so we
* should use this driver. We use revision 0 indicating it is
* unavailable.
*/
if (result) {
oem_platform_rev = result->integer.value;
ACPI_FREE(result);
}
dev_dbg(&dev->dev, "OEM Platform Revision %llu\n", oem_platform_rev);
return oem_platform_rev == 0;
}
static int surface_button_add(struct acpi_device *device)
{
struct surface_button *button;
struct input_dev *input;
const char *hid = acpi_device_hid(device);
char *name;
int error;
if (strncmp(acpi_device_bid(device), SURFACE_BUTTON_OBJ_NAME,
strlen(SURFACE_BUTTON_OBJ_NAME)))
return -ENODEV;
if (!surface_button_check_MSHW0040(device))
return -ENODEV;
button = kzalloc(sizeof(struct surface_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
device->driver_data = button;
button->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_free_button;
}
name = acpi_device_name(device);
strcpy(name, SURFACE_BUTTON_DEVICE_NAME);
snprintf(button->phys, sizeof(button->phys), "%s/buttons", hid);
input->name = name;
input->phys = button->phys;
input->id.bustype = BUS_HOST;
input->dev.parent = &device->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
input_set_capability(input, EV_KEY, KEY_LEFTMETA);
input_set_capability(input, EV_KEY, KEY_VOLUMEUP);
input_set_capability(input, EV_KEY, KEY_VOLUMEDOWN);
error = input_register_device(input);
if (error)
goto err_free_input;
device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
err_free_input:
input_free_device(input);
err_free_button:
kfree(button);
return error;
}
static void surface_button_remove(struct acpi_device *device)
{
struct surface_button *button = acpi_driver_data(device);
input_unregister_device(button->input);
kfree(button);
}
static SIMPLE_DEV_PM_OPS(surface_button_pm,
surface_button_suspend, surface_button_resume);
static struct acpi_driver surface_button_driver = {
.name = "surface_pro3_button",
.class = "SurfacePro3",
.ids = surface_button_device_ids,
.ops = {
.add = surface_button_add,
.remove = surface_button_remove,
.notify = surface_button_notify,
},
.drv.pm = &surface_button_pm,
};
module_acpi_driver(surface_button_driver);
| linux-master | drivers/platform/surface/surfacepro3_button.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Provides user-space access to the SSAM EC via the /dev/surface/aggregator
* misc device. Intended for debugging and development.
*
* Copyright (C) 2020-2022 Maximilian Luz <[email protected]>
*/
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/surface_aggregator/cdev.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/serial_hub.h>
#define SSAM_CDEV_DEVICE_NAME "surface_aggregator_cdev"
/* -- Main structures. ------------------------------------------------------ */
enum ssam_cdev_device_state {
SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
};
struct ssam_cdev {
struct kref kref;
struct rw_semaphore lock;
struct device *dev;
struct ssam_controller *ctrl;
struct miscdevice mdev;
unsigned long flags;
struct rw_semaphore client_lock; /* Guards client list. */
struct list_head client_list;
};
struct ssam_cdev_client;
struct ssam_cdev_notifier {
struct ssam_cdev_client *client;
struct ssam_event_notifier nf;
};
struct ssam_cdev_client {
struct ssam_cdev *cdev;
struct list_head node;
struct mutex notifier_lock; /* Guards notifier access for registration */
struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
struct mutex read_lock; /* Guards FIFO buffer read access */
struct mutex write_lock; /* Guards FIFO buffer write access */
DECLARE_KFIFO(buffer, u8, 4096);
wait_queue_head_t waitq;
struct fasync_struct *fasync;
};
static void __ssam_cdev_release(struct kref *kref)
{
kfree(container_of(kref, struct ssam_cdev, kref));
}
static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
{
if (cdev)
kref_get(&cdev->kref);
return cdev;
}
static void ssam_cdev_put(struct ssam_cdev *cdev)
{
if (cdev)
kref_put(&cdev->kref, __ssam_cdev_release);
}
/* -- Notifier handling. ---------------------------------------------------- */
static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
{
struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
struct ssam_cdev_client *client = cdev_nf->client;
struct ssam_cdev_event event;
size_t n = struct_size(&event, data, in->length);
/* Translate event. */
event.target_category = in->target_category;
event.target_id = in->target_id;
event.command_id = in->command_id;
event.instance_id = in->instance_id;
event.length = in->length;
mutex_lock(&client->write_lock);
/* Make sure we have enough space. */
if (kfifo_avail(&client->buffer) < n) {
dev_warn(client->cdev->dev,
"buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
in->target_category, in->target_id, in->command_id, in->instance_id);
mutex_unlock(&client->write_lock);
return 0;
}
/* Copy event header and payload. */
kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
kfifo_in(&client->buffer, &in->data[0], in->length);
mutex_unlock(&client->write_lock);
/* Notify waiting readers. */
kill_fasync(&client->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&client->waitq);
/*
* Don't mark events as handled, this is the job of a proper driver and
* not the debugging interface.
*/
return 0;
}
static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
{
const u16 rqid = ssh_tc_to_rqid(tc);
const u16 event = ssh_rqid_to_event(rqid);
struct ssam_cdev_notifier *nf;
int status;
lockdep_assert_held_read(&client->cdev->lock);
/* Validate notifier target category. */
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
mutex_lock(&client->notifier_lock);
/* Check if the notifier has already been registered. */
if (client->notifier[event]) {
mutex_unlock(&client->notifier_lock);
return -EEXIST;
}
/* Allocate new notifier. */
nf = kzalloc(sizeof(*nf), GFP_KERNEL);
if (!nf) {
mutex_unlock(&client->notifier_lock);
return -ENOMEM;
}
/*
* Create a dummy notifier with the minimal required fields for
* observer registration. Note that we can skip fully specifying event
* and registry here as we do not need any matching and use silent
* registration, which does not enable the corresponding event.
*/
nf->client = client;
nf->nf.base.fn = ssam_cdev_notifier;
nf->nf.base.priority = priority;
nf->nf.event.id.target_category = tc;
nf->nf.event.mask = 0; /* Do not do any matching. */
nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
/* Register notifier. */
status = ssam_notifier_register(client->cdev->ctrl, &nf->nf);
if (status)
kfree(nf);
else
client->notifier[event] = nf;
mutex_unlock(&client->notifier_lock);
return status;
}
static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
{
const u16 rqid = ssh_tc_to_rqid(tc);
const u16 event = ssh_rqid_to_event(rqid);
int status;
lockdep_assert_held_read(&client->cdev->lock);
/* Validate notifier target category. */
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
mutex_lock(&client->notifier_lock);
/* Check if the notifier is currently registered. */
if (!client->notifier[event]) {
mutex_unlock(&client->notifier_lock);
return -ENOENT;
}
/* Unregister and free notifier. */
status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf);
kfree(client->notifier[event]);
client->notifier[event] = NULL;
mutex_unlock(&client->notifier_lock);
return status;
}
static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
{
int i;
down_read(&client->cdev->lock);
/*
* This function may be used during shutdown, thus we need to test for
* cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
*/
if (client->cdev->ctrl) {
for (i = 0; i < SSH_NUM_EVENTS; i++)
ssam_cdev_notifier_unregister(client, i + 1);
} else {
int count = 0;
/*
* Device has been shut down. Any notifier remaining is a bug,
* so warn about that as this would otherwise hardly be
* noticeable. Nevertheless, free them as well.
*/
mutex_lock(&client->notifier_lock);
for (i = 0; i < SSH_NUM_EVENTS; i++) {
count += !!(client->notifier[i]);
kfree(client->notifier[i]);
client->notifier[i] = NULL;
}
mutex_unlock(&client->notifier_lock);
WARN_ON(count > 0);
}
up_read(&client->cdev->lock);
}
/* -- IOCTL functions. ------------------------------------------------------ */
static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
{
struct ssam_cdev_request rqst;
struct ssam_request spec = {};
struct ssam_response rsp = {};
const void __user *plddata;
void __user *rspdata;
int status = 0, ret = 0, tmp;
lockdep_assert_held_read(&client->cdev->lock);
ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
if (ret)
goto out;
plddata = u64_to_user_ptr(rqst.payload.data);
rspdata = u64_to_user_ptr(rqst.response.data);
/* Setup basic request fields. */
spec.target_category = rqst.target_category;
spec.target_id = rqst.target_id;
spec.command_id = rqst.command_id;
spec.instance_id = rqst.instance_id;
spec.flags = 0;
spec.length = rqst.payload.length;
spec.payload = NULL;
if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
spec.flags |= SSAM_REQUEST_UNSEQUENCED;
rsp.capacity = rqst.response.length;
rsp.length = 0;
rsp.pointer = NULL;
/* Get request payload from user-space. */
if (spec.length) {
if (!plddata) {
ret = -EINVAL;
goto out;
}
/*
* Note: spec.length is limited to U16_MAX bytes via struct
* ssam_cdev_request. This is slightly larger than the
* theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
* underlying protocol (note that nothing remotely this size
* should ever be allocated in any normal case). This size is
* validated later in ssam_request_do_sync(), for allocation
* the bound imposed by u16 should be enough.
*/
spec.payload = kzalloc(spec.length, GFP_KERNEL);
if (!spec.payload) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
ret = -EFAULT;
goto out;
}
}
/* Allocate response buffer. */
if (rsp.capacity) {
if (!rspdata) {
ret = -EINVAL;
goto out;
}
/*
* Note: rsp.capacity is limited to U16_MAX bytes via struct
* ssam_cdev_request. This is slightly larger than the
* theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
* underlying protocol (note that nothing remotely this size
* should ever be allocated in any normal case). In later use,
* this capacity does not have to be strictly bounded, as it
* is only used as an output buffer to be written to. For
* allocation the bound imposed by u16 should be enough.
*/
rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
if (!rsp.pointer) {
ret = -ENOMEM;
goto out;
}
}
/* Perform request. */
status = ssam_request_do_sync(client->cdev->ctrl, &spec, &rsp);
if (status)
goto out;
/* Copy response to user-space. */
if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
ret = -EFAULT;
out:
/* Always try to set response-length and status. */
tmp = put_user(rsp.length, &r->response.length);
if (tmp)
ret = tmp;
tmp = put_user(status, &r->status);
if (tmp)
ret = tmp;
/* Cleanup. */
kfree(spec.payload);
kfree(rsp.pointer);
return ret;
}
static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
const struct ssam_cdev_notifier_desc __user *d)
{
struct ssam_cdev_notifier_desc desc;
long ret;
lockdep_assert_held_read(&client->cdev->lock);
ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
if (ret)
return ret;
return ssam_cdev_notifier_register(client, desc.target_category, desc.priority);
}
static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
const struct ssam_cdev_notifier_desc __user *d)
{
struct ssam_cdev_notifier_desc desc;
long ret;
lockdep_assert_held_read(&client->cdev->lock);
ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
if (ret)
return ret;
return ssam_cdev_notifier_unregister(client, desc.target_category);
}
static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
const struct ssam_cdev_event_desc __user *d)
{
struct ssam_cdev_event_desc desc;
struct ssam_event_registry reg;
struct ssam_event_id id;
long ret;
lockdep_assert_held_read(&client->cdev->lock);
/* Read descriptor from user-space. */
ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
if (ret)
return ret;
/* Translate descriptor. */
reg.target_category = desc.reg.target_category;
reg.target_id = desc.reg.target_id;
reg.cid_enable = desc.reg.cid_enable;
reg.cid_disable = desc.reg.cid_disable;
id.target_category = desc.id.target_category;
id.instance = desc.id.instance;
/* Disable event. */
return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags);
}
static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
const struct ssam_cdev_event_desc __user *d)
{
struct ssam_cdev_event_desc desc;
struct ssam_event_registry reg;
struct ssam_event_id id;
long ret;
lockdep_assert_held_read(&client->cdev->lock);
/* Read descriptor from user-space. */
ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
if (ret)
return ret;
/* Translate descriptor. */
reg.target_category = desc.reg.target_category;
reg.target_id = desc.reg.target_id;
reg.cid_enable = desc.reg.cid_enable;
reg.cid_disable = desc.reg.cid_disable;
id.target_category = desc.id.target_category;
id.instance = desc.id.instance;
/* Disable event. */
return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags);
}
/* -- File operations. ------------------------------------------------------ */
static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
{
struct miscdevice *mdev = filp->private_data;
struct ssam_cdev_client *client;
struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
/* Initialize client */
client = vzalloc(sizeof(*client));
if (!client)
return -ENOMEM;
client->cdev = ssam_cdev_get(cdev);
INIT_LIST_HEAD(&client->node);
mutex_init(&client->notifier_lock);
mutex_init(&client->read_lock);
mutex_init(&client->write_lock);
INIT_KFIFO(client->buffer);
init_waitqueue_head(&client->waitq);
filp->private_data = client;
/* Attach client. */
down_write(&cdev->client_lock);
if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
up_write(&cdev->client_lock);
mutex_destroy(&client->write_lock);
mutex_destroy(&client->read_lock);
mutex_destroy(&client->notifier_lock);
ssam_cdev_put(client->cdev);
vfree(client);
return -ENODEV;
}
list_add_tail(&client->node, &cdev->client_list);
up_write(&cdev->client_lock);
stream_open(inode, filp);
return 0;
}
static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
{
struct ssam_cdev_client *client = filp->private_data;
/* Force-unregister all remaining notifiers of this client. */
ssam_cdev_notifier_unregister_all(client);
/* Detach client. */
down_write(&client->cdev->client_lock);
list_del(&client->node);
up_write(&client->cdev->client_lock);
/* Free client. */
mutex_destroy(&client->write_lock);
mutex_destroy(&client->read_lock);
mutex_destroy(&client->notifier_lock);
ssam_cdev_put(client->cdev);
vfree(client);
return 0;
}
static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
unsigned long arg)
{
lockdep_assert_held_read(&client->cdev->lock);
switch (cmd) {
case SSAM_CDEV_REQUEST:
return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg);
case SSAM_CDEV_NOTIF_REGISTER:
return ssam_cdev_notif_register(client,
(struct ssam_cdev_notifier_desc __user *)arg);
case SSAM_CDEV_NOTIF_UNREGISTER:
return ssam_cdev_notif_unregister(client,
(struct ssam_cdev_notifier_desc __user *)arg);
case SSAM_CDEV_EVENT_ENABLE:
return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg);
case SSAM_CDEV_EVENT_DISABLE:
return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg);
default:
return -ENOTTY;
}
}
static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ssam_cdev_client *client = file->private_data;
long status;
/* Ensure that controller is valid for as long as we need it. */
if (down_read_killable(&client->cdev->lock))
return -ERESTARTSYS;
if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
up_read(&client->cdev->lock);
return -ENODEV;
}
status = __ssam_cdev_device_ioctl(client, cmd, arg);
up_read(&client->cdev->lock);
return status;
}
static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
{
struct ssam_cdev_client *client = file->private_data;
struct ssam_cdev *cdev = client->cdev;
unsigned int copied;
int status = 0;
if (down_read_killable(&cdev->lock))
return -ERESTARTSYS;
/* Make sure we're not shut down. */
if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
up_read(&cdev->lock);
return -ENODEV;
}
do {
/* Check availability, wait if necessary. */
if (kfifo_is_empty(&client->buffer)) {
up_read(&cdev->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
status = wait_event_interruptible(client->waitq,
!kfifo_is_empty(&client->buffer) ||
test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
&cdev->flags));
if (status < 0)
return status;
if (down_read_killable(&cdev->lock))
return -ERESTARTSYS;
/* Need to check that we're not shut down again. */
if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
up_read(&cdev->lock);
return -ENODEV;
}
}
/* Try to read from FIFO. */
if (mutex_lock_interruptible(&client->read_lock)) {
up_read(&cdev->lock);
return -ERESTARTSYS;
}
status = kfifo_to_user(&client->buffer, buf, count, &copied);
mutex_unlock(&client->read_lock);
if (status < 0) {
up_read(&cdev->lock);
return status;
}
/* We might not have gotten anything, check this here. */
if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
up_read(&cdev->lock);
return -EAGAIN;
}
} while (copied == 0);
up_read(&cdev->lock);
return copied;
}
static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
{
struct ssam_cdev_client *client = file->private_data;
__poll_t events = 0;
if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
return EPOLLHUP | EPOLLERR;
poll_wait(file, &client->waitq, pt);
if (!kfifo_is_empty(&client->buffer))
events |= EPOLLIN | EPOLLRDNORM;
return events;
}
static int ssam_cdev_fasync(int fd, struct file *file, int on)
{
struct ssam_cdev_client *client = file->private_data;
return fasync_helper(fd, file, on, &client->fasync);
}
static const struct file_operations ssam_controller_fops = {
.owner = THIS_MODULE,
.open = ssam_cdev_device_open,
.release = ssam_cdev_device_release,
.read = ssam_cdev_read,
.poll = ssam_cdev_poll,
.fasync = ssam_cdev_fasync,
.unlocked_ioctl = ssam_cdev_device_ioctl,
.compat_ioctl = ssam_cdev_device_ioctl,
.llseek = no_llseek,
};
/* -- Device and driver setup ----------------------------------------------- */
static int ssam_dbg_device_probe(struct platform_device *pdev)
{
struct ssam_controller *ctrl;
struct ssam_cdev *cdev;
int status;
ctrl = ssam_client_bind(&pdev->dev);
if (IS_ERR(ctrl))
return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return -ENOMEM;
kref_init(&cdev->kref);
init_rwsem(&cdev->lock);
cdev->ctrl = ctrl;
cdev->dev = &pdev->dev;
cdev->mdev.parent = &pdev->dev;
cdev->mdev.minor = MISC_DYNAMIC_MINOR;
cdev->mdev.name = "surface_aggregator";
cdev->mdev.nodename = "surface/aggregator";
cdev->mdev.fops = &ssam_controller_fops;
init_rwsem(&cdev->client_lock);
INIT_LIST_HEAD(&cdev->client_list);
status = misc_register(&cdev->mdev);
if (status) {
kfree(cdev);
return status;
}
platform_set_drvdata(pdev, cdev);
return 0;
}
static int ssam_dbg_device_remove(struct platform_device *pdev)
{
struct ssam_cdev *cdev = platform_get_drvdata(pdev);
struct ssam_cdev_client *client;
/*
* Mark device as shut-down. Prevent new clients from being added and
* new operations from being executed.
*/
set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags);
down_write(&cdev->client_lock);
/* Remove all notifiers registered by us. */
list_for_each_entry(client, &cdev->client_list, node) {
ssam_cdev_notifier_unregister_all(client);
}
/* Wake up async clients. */
list_for_each_entry(client, &cdev->client_list, node) {
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
}
/* Wake up blocking clients. */
list_for_each_entry(client, &cdev->client_list, node) {
wake_up_interruptible(&client->waitq);
}
up_write(&cdev->client_lock);
/*
* The controller is only guaranteed to be valid for as long as the
* driver is bound. Remove controller so that any lingering open files
* cannot access it any more after we're gone.
*/
down_write(&cdev->lock);
cdev->ctrl = NULL;
cdev->dev = NULL;
up_write(&cdev->lock);
misc_deregister(&cdev->mdev);
ssam_cdev_put(cdev);
return 0;
}
static struct platform_device *ssam_cdev_device;
static struct platform_driver ssam_cdev_driver = {
.probe = ssam_dbg_device_probe,
.remove = ssam_dbg_device_remove,
.driver = {
.name = SSAM_CDEV_DEVICE_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static int __init ssam_debug_init(void)
{
int status;
ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
PLATFORM_DEVID_NONE);
if (!ssam_cdev_device)
return -ENOMEM;
status = platform_device_add(ssam_cdev_device);
if (status)
goto err_device;
status = platform_driver_register(&ssam_cdev_driver);
if (status)
goto err_driver;
return 0;
err_driver:
platform_device_del(ssam_cdev_device);
err_device:
platform_device_put(ssam_cdev_device);
return status;
}
module_init(ssam_debug_init);
static void __exit ssam_debug_exit(void)
{
platform_driver_unregister(&ssam_cdev_driver);
platform_device_unregister(ssam_cdev_device);
}
module_exit(ssam_debug_exit);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_aggregator_cdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Surface GPE/Lid driver to enable wakeup from suspend via the lid by
* properly configuring the respective GPEs. Required for wakeup via lid on
* newer Intel-based Microsoft Surface devices.
*
* Copyright (C) 2020-2022 Maximilian Luz <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
/*
* Note: The GPE numbers for the lid devices found below have been obtained
* from ACPI/the DSDT table, specifically from the GPE handler for the
* lid.
*/
static const struct property_entry lid_device_props_l17[] = {
PROPERTY_ENTRY_U32("gpe", 0x17),
{},
};
static const struct property_entry lid_device_props_l4B[] = {
PROPERTY_ENTRY_U32("gpe", 0x4B),
{},
};
static const struct property_entry lid_device_props_l4D[] = {
PROPERTY_ENTRY_U32("gpe", 0x4D),
{},
};
static const struct property_entry lid_device_props_l4F[] = {
PROPERTY_ENTRY_U32("gpe", 0x4F),
{},
};
static const struct property_entry lid_device_props_l57[] = {
PROPERTY_ENTRY_U32("gpe", 0x57),
{},
};
/*
* Note: When changing this, don't forget to check that the MODULE_ALIAS below
* still fits.
*/
static const struct dmi_system_id dmi_lid_device_table[] = {
{
.ident = "Surface Pro 4",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
},
.driver_data = (void *)lid_device_props_l17,
},
{
.ident = "Surface Pro 5",
.matches = {
/*
* We match for SKU here due to generic product name
* "Surface Pro".
*/
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
},
.driver_data = (void *)lid_device_props_l4F,
},
{
.ident = "Surface Pro 5 (LTE)",
.matches = {
/*
* We match for SKU here due to generic product name
* "Surface Pro"
*/
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
},
.driver_data = (void *)lid_device_props_l4F,
},
{
.ident = "Surface Pro 6",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
},
.driver_data = (void *)lid_device_props_l4F,
},
{
.ident = "Surface Pro 7",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 7"),
},
.driver_data = (void *)lid_device_props_l4D,
},
{
.ident = "Surface Pro 8",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 8"),
},
.driver_data = (void *)lid_device_props_l4B,
},
{
.ident = "Surface Book 1",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
},
.driver_data = (void *)lid_device_props_l17,
},
{
.ident = "Surface Book 2",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
},
.driver_data = (void *)lid_device_props_l17,
},
{
.ident = "Surface Book 3",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 3"),
},
.driver_data = (void *)lid_device_props_l4D,
},
{
.ident = "Surface Laptop 1",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
},
.driver_data = (void *)lid_device_props_l57,
},
{
.ident = "Surface Laptop 2",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
},
.driver_data = (void *)lid_device_props_l57,
},
{
.ident = "Surface Laptop 3 (Intel 13\")",
.matches = {
/*
* We match for SKU here due to different variants: The
* AMD (15") version does not rely on GPEs.
*/
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1867:1868"),
},
.driver_data = (void *)lid_device_props_l4D,
},
{
.ident = "Surface Laptop 3 (Intel 15\")",
.matches = {
/*
* We match for SKU here due to different variants: The
* AMD (15") version does not rely on GPEs.
*/
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_3_1872"),
},
.driver_data = (void *)lid_device_props_l4D,
},
{
.ident = "Surface Laptop 4 (Intel 13\")",
.matches = {
/*
* We match for SKU here due to different variants: The
* AMD (15") version does not rely on GPEs.
*/
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Laptop_4_1950:1951"),
},
.driver_data = (void *)lid_device_props_l4B,
},
{
.ident = "Surface Laptop Studio",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop Studio"),
},
.driver_data = (void *)lid_device_props_l4B,
},
{ }
};
struct surface_lid_device {
u32 gpe_number;
};
static int surface_lid_enable_wakeup(struct device *dev, bool enable)
{
const struct surface_lid_device *lid = dev_get_drvdata(dev);
int action = enable ? ACPI_GPE_ENABLE : ACPI_GPE_DISABLE;
acpi_status status;
status = acpi_set_gpe_wake_mask(NULL, lid->gpe_number, action);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to set GPE wake mask: %s\n",
acpi_format_exception(status));
return -EINVAL;
}
return 0;
}
static int __maybe_unused surface_gpe_suspend(struct device *dev)
{
return surface_lid_enable_wakeup(dev, true);
}
static int __maybe_unused surface_gpe_resume(struct device *dev)
{
return surface_lid_enable_wakeup(dev, false);
}
static SIMPLE_DEV_PM_OPS(surface_gpe_pm, surface_gpe_suspend, surface_gpe_resume);
static int surface_gpe_probe(struct platform_device *pdev)
{
struct surface_lid_device *lid;
u32 gpe_number;
acpi_status status;
int ret;
ret = device_property_read_u32(&pdev->dev, "gpe", &gpe_number);
if (ret) {
dev_err(&pdev->dev, "failed to read 'gpe' property: %d\n", ret);
return ret;
}
lid = devm_kzalloc(&pdev->dev, sizeof(*lid), GFP_KERNEL);
if (!lid)
return -ENOMEM;
lid->gpe_number = gpe_number;
platform_set_drvdata(pdev, lid);
status = acpi_mark_gpe_for_wake(NULL, gpe_number);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to mark GPE for wake: %s\n",
acpi_format_exception(status));
return -EINVAL;
}
status = acpi_enable_gpe(NULL, gpe_number);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to enable GPE: %s\n",
acpi_format_exception(status));
return -EINVAL;
}
ret = surface_lid_enable_wakeup(&pdev->dev, false);
if (ret)
acpi_disable_gpe(NULL, gpe_number);
return ret;
}
static int surface_gpe_remove(struct platform_device *pdev)
{
struct surface_lid_device *lid = dev_get_drvdata(&pdev->dev);
/* restore default behavior without this module */
surface_lid_enable_wakeup(&pdev->dev, false);
acpi_disable_gpe(NULL, lid->gpe_number);
return 0;
}
static struct platform_driver surface_gpe_driver = {
.probe = surface_gpe_probe,
.remove = surface_gpe_remove,
.driver = {
.name = "surface_gpe",
.pm = &surface_gpe_pm,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
static struct platform_device *surface_gpe_device;
static int __init surface_gpe_init(void)
{
const struct dmi_system_id *match;
struct platform_device *pdev;
struct fwnode_handle *fwnode;
int status;
match = dmi_first_match(dmi_lid_device_table);
if (!match) {
pr_info("no compatible Microsoft Surface device found, exiting\n");
return -ENODEV;
}
status = platform_driver_register(&surface_gpe_driver);
if (status)
return status;
fwnode = fwnode_create_software_node(match->driver_data, NULL);
if (IS_ERR(fwnode)) {
status = PTR_ERR(fwnode);
goto err_node;
}
pdev = platform_device_alloc("surface_gpe", PLATFORM_DEVID_NONE);
if (!pdev) {
status = -ENOMEM;
goto err_alloc;
}
pdev->dev.fwnode = fwnode;
status = platform_device_add(pdev);
if (status)
goto err_add;
surface_gpe_device = pdev;
return 0;
err_add:
platform_device_put(pdev);
err_alloc:
fwnode_remove_software_node(fwnode);
err_node:
platform_driver_unregister(&surface_gpe_driver);
return status;
}
module_init(surface_gpe_init);
static void __exit surface_gpe_exit(void)
{
struct fwnode_handle *fwnode = surface_gpe_device->dev.fwnode;
platform_device_unregister(surface_gpe_device);
platform_driver_unregister(&surface_gpe_driver);
fwnode_remove_software_node(fwnode);
}
module_exit(surface_gpe_exit);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Surface GPE/Lid Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*:svnMicrosoftCorporation:pnSurface*:*");
| linux-master | drivers/platform/surface/surface_gpe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface System Aggregator Module (SSAM) tablet mode switch driver.
*
* Copyright (C) 2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
/* -- SSAM generic tablet switch driver framework. -------------------------- */
struct ssam_tablet_sw;
struct ssam_tablet_sw_state {
u32 source;
u32 state;
};
struct ssam_tablet_sw_ops {
int (*get_state)(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state);
const char *(*state_name)(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state);
bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state);
};
struct ssam_tablet_sw {
struct ssam_device *sdev;
struct ssam_tablet_sw_state state;
struct work_struct update_work;
struct input_dev *mode_switch;
struct ssam_tablet_sw_ops ops;
struct ssam_event_notifier notif;
};
struct ssam_tablet_sw_desc {
struct {
const char *name;
const char *phys;
} dev;
struct {
u32 (*notify)(struct ssam_event_notifier *nf, const struct ssam_event *event);
int (*get_state)(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state);
const char *(*state_name)(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state);
bool (*state_is_tablet_mode)(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state);
} ops;
struct {
struct ssam_event_registry reg;
struct ssam_event_id id;
enum ssam_event_mask mask;
u8 flags;
} event;
};
static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
const char *state = sw->ops.state_name(sw, &sw->state);
return sysfs_emit(buf, "%s\n", state);
}
static DEVICE_ATTR_RO(state);
static struct attribute *ssam_tablet_sw_attrs[] = {
&dev_attr_state.attr,
NULL,
};
static const struct attribute_group ssam_tablet_sw_group = {
.attrs = ssam_tablet_sw_attrs,
};
static void ssam_tablet_sw_update_workfn(struct work_struct *work)
{
struct ssam_tablet_sw *sw = container_of(work, struct ssam_tablet_sw, update_work);
struct ssam_tablet_sw_state state;
int tablet, status;
status = sw->ops.get_state(sw, &state);
if (status)
return;
if (sw->state.source == state.source && sw->state.state == state.state)
return;
sw->state = state;
/* Send SW_TABLET_MODE event. */
tablet = sw->ops.state_is_tablet_mode(sw, &state);
input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
input_sync(sw->mode_switch);
}
static int __maybe_unused ssam_tablet_sw_resume(struct device *dev)
{
struct ssam_tablet_sw *sw = dev_get_drvdata(dev);
schedule_work(&sw->update_work);
return 0;
}
static SIMPLE_DEV_PM_OPS(ssam_tablet_sw_pm_ops, NULL, ssam_tablet_sw_resume);
static int ssam_tablet_sw_probe(struct ssam_device *sdev)
{
const struct ssam_tablet_sw_desc *desc;
struct ssam_tablet_sw *sw;
int tablet, status;
desc = ssam_device_get_match_data(sdev);
if (!desc) {
WARN(1, "no driver match data specified");
return -EINVAL;
}
sw = devm_kzalloc(&sdev->dev, sizeof(*sw), GFP_KERNEL);
if (!sw)
return -ENOMEM;
sw->sdev = sdev;
sw->ops.get_state = desc->ops.get_state;
sw->ops.state_name = desc->ops.state_name;
sw->ops.state_is_tablet_mode = desc->ops.state_is_tablet_mode;
INIT_WORK(&sw->update_work, ssam_tablet_sw_update_workfn);
ssam_device_set_drvdata(sdev, sw);
/* Get initial state. */
status = sw->ops.get_state(sw, &sw->state);
if (status)
return status;
/* Set up tablet mode switch. */
sw->mode_switch = devm_input_allocate_device(&sdev->dev);
if (!sw->mode_switch)
return -ENOMEM;
sw->mode_switch->name = desc->dev.name;
sw->mode_switch->phys = desc->dev.phys;
sw->mode_switch->id.bustype = BUS_HOST;
sw->mode_switch->dev.parent = &sdev->dev;
tablet = sw->ops.state_is_tablet_mode(sw, &sw->state);
input_set_capability(sw->mode_switch, EV_SW, SW_TABLET_MODE);
input_report_switch(sw->mode_switch, SW_TABLET_MODE, tablet);
status = input_register_device(sw->mode_switch);
if (status)
return status;
/* Set up notifier. */
sw->notif.base.priority = 0;
sw->notif.base.fn = desc->ops.notify;
sw->notif.event.reg = desc->event.reg;
sw->notif.event.id = desc->event.id;
sw->notif.event.mask = desc->event.mask;
sw->notif.event.flags = SSAM_EVENT_SEQUENCED;
status = ssam_device_notifier_register(sdev, &sw->notif);
if (status)
return status;
status = sysfs_create_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
if (status)
goto err;
/* We might have missed events during setup, so check again. */
schedule_work(&sw->update_work);
return 0;
err:
ssam_device_notifier_unregister(sdev, &sw->notif);
cancel_work_sync(&sw->update_work);
return status;
}
static void ssam_tablet_sw_remove(struct ssam_device *sdev)
{
struct ssam_tablet_sw *sw = ssam_device_get_drvdata(sdev);
sysfs_remove_group(&sdev->dev.kobj, &ssam_tablet_sw_group);
ssam_device_notifier_unregister(sdev, &sw->notif);
cancel_work_sync(&sw->update_work);
}
/* -- SSAM KIP tablet switch implementation. -------------------------------- */
#define SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED 0x1d
enum ssam_kip_cover_state {
SSAM_KIP_COVER_STATE_DISCONNECTED = 0x01,
SSAM_KIP_COVER_STATE_CLOSED = 0x02,
SSAM_KIP_COVER_STATE_LAPTOP = 0x03,
SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05,
SSAM_KIP_COVER_STATE_BOOK = 0x06,
};
static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state)
{
switch (state->state) {
case SSAM_KIP_COVER_STATE_DISCONNECTED:
return "disconnected";
case SSAM_KIP_COVER_STATE_CLOSED:
return "closed";
case SSAM_KIP_COVER_STATE_LAPTOP:
return "laptop";
case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
return "folded-canvas";
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
return "folded-back";
case SSAM_KIP_COVER_STATE_BOOK:
return "book";
default:
dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state);
return "<unknown>";
}
}
static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state)
{
switch (state->state) {
case SSAM_KIP_COVER_STATE_DISCONNECTED:
case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
case SSAM_KIP_COVER_STATE_FOLDED_BACK:
case SSAM_KIP_COVER_STATE_BOOK:
return true;
case SSAM_KIP_COVER_STATE_CLOSED:
case SSAM_KIP_COVER_STATE_LAPTOP:
return false;
default:
dev_warn(&sw->sdev->dev, "unknown KIP cover state: %d\n", state->state);
return true;
}
}
SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
.target_category = SSAM_SSH_TC_KIP,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x1d,
.instance_id = 0x00,
});
static int ssam_kip_get_cover_state(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state)
{
int status;
u8 raw;
status = ssam_retry(__ssam_kip_get_cover_state, sw->sdev->ctrl, &raw);
if (status < 0) {
dev_err(&sw->sdev->dev, "failed to query KIP lid state: %d\n", status);
return status;
}
state->source = 0; /* Unused for KIP switch. */
state->state = raw;
return 0;
}
static u32 ssam_kip_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
{
struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
if (event->command_id != SSAM_EVENT_KIP_CID_COVER_STATE_CHANGED)
return 0; /* Return "unhandled". */
if (event->length < 1)
dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
schedule_work(&sw->update_work);
return SSAM_NOTIF_HANDLED;
}
static const struct ssam_tablet_sw_desc ssam_kip_sw_desc = {
.dev = {
.name = "Microsoft Surface KIP Tablet Mode Switch",
.phys = "ssam/01:0e:01:00:01/input0",
},
.ops = {
.notify = ssam_kip_sw_notif,
.get_state = ssam_kip_get_cover_state,
.state_name = ssam_kip_cover_state_name,
.state_is_tablet_mode = ssam_kip_cover_state_is_tablet_mode,
},
.event = {
.reg = SSAM_EVENT_REGISTRY_SAM,
.id = {
.target_category = SSAM_SSH_TC_KIP,
.instance = 0,
},
.mask = SSAM_EVENT_MASK_TARGET,
},
};
/* -- SSAM POS tablet switch implementation. -------------------------------- */
static bool tablet_mode_in_slate_state = true;
module_param(tablet_mode_in_slate_state, bool, 0644);
MODULE_PARM_DESC(tablet_mode_in_slate_state, "Enable tablet mode in slate device posture, default is 'true'");
#define SSAM_EVENT_POS_CID_POSTURE_CHANGED 0x03
#define SSAM_POS_MAX_SOURCES 4
enum ssam_pos_source_id {
SSAM_POS_SOURCE_COVER = 0x00,
SSAM_POS_SOURCE_SLS = 0x03,
};
enum ssam_pos_state_cover {
SSAM_POS_COVER_DISCONNECTED = 0x01,
SSAM_POS_COVER_CLOSED = 0x02,
SSAM_POS_COVER_LAPTOP = 0x03,
SSAM_POS_COVER_FOLDED_CANVAS = 0x04,
SSAM_POS_COVER_FOLDED_BACK = 0x05,
SSAM_POS_COVER_BOOK = 0x06,
};
enum ssam_pos_state_sls {
SSAM_POS_SLS_LID_CLOSED = 0x00,
SSAM_POS_SLS_LAPTOP = 0x01,
SSAM_POS_SLS_SLATE = 0x02,
SSAM_POS_SLS_TABLET = 0x03,
};
struct ssam_sources_list {
__le32 count;
__le32 id[SSAM_POS_MAX_SOURCES];
} __packed;
static const char *ssam_pos_state_name_cover(struct ssam_tablet_sw *sw, u32 state)
{
switch (state) {
case SSAM_POS_COVER_DISCONNECTED:
return "disconnected";
case SSAM_POS_COVER_CLOSED:
return "closed";
case SSAM_POS_COVER_LAPTOP:
return "laptop";
case SSAM_POS_COVER_FOLDED_CANVAS:
return "folded-canvas";
case SSAM_POS_COVER_FOLDED_BACK:
return "folded-back";
case SSAM_POS_COVER_BOOK:
return "book";
default:
dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state);
return "<unknown>";
}
}
static const char *ssam_pos_state_name_sls(struct ssam_tablet_sw *sw, u32 state)
{
switch (state) {
case SSAM_POS_SLS_LID_CLOSED:
return "closed";
case SSAM_POS_SLS_LAPTOP:
return "laptop";
case SSAM_POS_SLS_SLATE:
return "slate";
case SSAM_POS_SLS_TABLET:
return "tablet";
default:
dev_warn(&sw->sdev->dev, "unknown device posture for SLS: %u\n", state);
return "<unknown>";
}
}
static const char *ssam_pos_state_name(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state)
{
switch (state->source) {
case SSAM_POS_SOURCE_COVER:
return ssam_pos_state_name_cover(sw, state->state);
case SSAM_POS_SOURCE_SLS:
return ssam_pos_state_name_sls(sw, state->state);
default:
dev_warn(&sw->sdev->dev, "unknown device posture source: %u\n", state->source);
return "<unknown>";
}
}
static bool ssam_pos_state_is_tablet_mode_cover(struct ssam_tablet_sw *sw, u32 state)
{
switch (state) {
case SSAM_POS_COVER_DISCONNECTED:
case SSAM_POS_COVER_FOLDED_CANVAS:
case SSAM_POS_COVER_FOLDED_BACK:
case SSAM_POS_COVER_BOOK:
return true;
case SSAM_POS_COVER_CLOSED:
case SSAM_POS_COVER_LAPTOP:
return false;
default:
dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state);
return true;
}
}
static bool ssam_pos_state_is_tablet_mode_sls(struct ssam_tablet_sw *sw, u32 state)
{
switch (state) {
case SSAM_POS_SLS_LAPTOP:
case SSAM_POS_SLS_LID_CLOSED:
return false;
case SSAM_POS_SLS_SLATE:
return tablet_mode_in_slate_state;
case SSAM_POS_SLS_TABLET:
return true;
default:
dev_warn(&sw->sdev->dev, "unknown device posture for SLS: %u\n", state);
return true;
}
}
static bool ssam_pos_state_is_tablet_mode(struct ssam_tablet_sw *sw,
const struct ssam_tablet_sw_state *state)
{
switch (state->source) {
case SSAM_POS_SOURCE_COVER:
return ssam_pos_state_is_tablet_mode_cover(sw, state->state);
case SSAM_POS_SOURCE_SLS:
return ssam_pos_state_is_tablet_mode_sls(sw, state->state);
default:
dev_warn(&sw->sdev->dev, "unknown device posture source: %u\n", state->source);
return true;
}
}
static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sources_list *sources)
{
struct ssam_request rqst;
struct ssam_response rsp;
int status;
rqst.target_category = SSAM_SSH_TC_POS;
rqst.target_id = SSAM_SSH_TID_SAM;
rqst.command_id = 0x01;
rqst.instance_id = 0x00;
rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
rqst.length = 0;
rqst.payload = NULL;
rsp.capacity = sizeof(*sources);
rsp.length = 0;
rsp.pointer = (u8 *)sources;
status = ssam_retry(ssam_request_do_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
if (status)
return status;
/* We need at least the 'sources->count' field. */
if (rsp.length < sizeof(__le32)) {
dev_err(&sw->sdev->dev, "received source list response is too small\n");
return -EPROTO;
}
/* Make sure 'sources->count' matches with the response length. */
if (get_unaligned_le32(&sources->count) * sizeof(__le32) + sizeof(__le32) != rsp.length) {
dev_err(&sw->sdev->dev, "mismatch between number of sources and response size\n");
return -EPROTO;
}
return 0;
}
static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
{
struct ssam_sources_list sources = {};
int status;
status = ssam_pos_get_sources_list(sw, &sources);
if (status)
return status;
if (get_unaligned_le32(&sources.count) == 0) {
dev_err(&sw->sdev->dev, "no posture sources found\n");
return -ENODEV;
}
/*
* We currently don't know what to do with more than one posture
* source. At the moment, only one source seems to be used/provided.
* The WARN_ON() here should hopefully let us know quickly once there
* is a device that provides multiple sources, at which point we can
* then try to figure out how to handle them.
*/
WARN_ON(get_unaligned_le32(&sources.count) > 1);
*source_id = get_unaligned_le32(&sources.id[0]);
return 0;
}
SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
.target_category = SSAM_SSH_TC_POS,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x02,
.instance_id = 0x00,
});
static int ssam_pos_get_posture_for_source(struct ssam_tablet_sw *sw, u32 source_id, u32 *posture)
{
__le32 source_le = cpu_to_le32(source_id);
__le32 rspval_le = 0;
int status;
status = ssam_retry(__ssam_pos_get_posture_for_source, sw->sdev->ctrl,
&source_le, &rspval_le);
if (status)
return status;
*posture = le32_to_cpu(rspval_le);
return 0;
}
static int ssam_pos_get_posture(struct ssam_tablet_sw *sw, struct ssam_tablet_sw_state *state)
{
u32 source_id;
u32 source_state;
int status;
status = ssam_pos_get_source(sw, &source_id);
if (status) {
dev_err(&sw->sdev->dev, "failed to get posture source ID: %d\n", status);
return status;
}
status = ssam_pos_get_posture_for_source(sw, source_id, &source_state);
if (status) {
dev_err(&sw->sdev->dev, "failed to get posture value for source %u: %d\n",
source_id, status);
return status;
}
state->source = source_id;
state->state = source_state;
return 0;
}
static u32 ssam_pos_sw_notif(struct ssam_event_notifier *nf, const struct ssam_event *event)
{
struct ssam_tablet_sw *sw = container_of(nf, struct ssam_tablet_sw, notif);
if (event->command_id != SSAM_EVENT_POS_CID_POSTURE_CHANGED)
return 0; /* Return "unhandled". */
if (event->length != sizeof(__le32) * 3)
dev_warn(&sw->sdev->dev, "unexpected payload size: %u\n", event->length);
schedule_work(&sw->update_work);
return SSAM_NOTIF_HANDLED;
}
static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
.dev = {
.name = "Microsoft Surface POS Tablet Mode Switch",
.phys = "ssam/01:26:01:00:01/input0",
},
.ops = {
.notify = ssam_pos_sw_notif,
.get_state = ssam_pos_get_posture,
.state_name = ssam_pos_state_name,
.state_is_tablet_mode = ssam_pos_state_is_tablet_mode,
},
.event = {
.reg = SSAM_EVENT_REGISTRY_SAM,
.id = {
.target_category = SSAM_SSH_TC_POS,
.instance = 0,
},
.mask = SSAM_EVENT_MASK_TARGET,
},
};
/* -- Driver registration. -------------------------------------------------- */
static const struct ssam_device_id ssam_tablet_sw_match[] = {
{ SSAM_SDEV(KIP, SAM, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
{ SSAM_SDEV(POS, SAM, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
{ },
};
MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
static struct ssam_device_driver ssam_tablet_sw_driver = {
.probe = ssam_tablet_sw_probe,
.remove = ssam_tablet_sw_remove,
.match_table = ssam_tablet_sw_match,
.driver = {
.name = "surface_aggregator_tablet_mode_switch",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &ssam_tablet_sw_pm_ops,
},
};
module_ssam_device_driver(ssam_tablet_sw_driver);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Tablet mode switch driver for Surface devices using the Surface Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_aggregator_tabletsw.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface Book (2 and later) hot-plug driver.
*
* Surface Book devices (can) have a hot-pluggable discrete GPU (dGPU). This
* driver is responsible for out-of-band hot-plug event signaling on these
* devices. It is specifically required when the hot-plug device is in D3cold
* and can thus not generate PCIe hot-plug events itself.
*
* Event signaling is handled via ACPI, which will generate the appropriate
* device-check notifications to be picked up by the PCIe hot-plug driver.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
static const struct acpi_gpio_params shps_base_presence_int = { 0, 0, false };
static const struct acpi_gpio_params shps_base_presence = { 1, 0, false };
static const struct acpi_gpio_params shps_device_power_int = { 2, 0, false };
static const struct acpi_gpio_params shps_device_power = { 3, 0, false };
static const struct acpi_gpio_params shps_device_presence_int = { 4, 0, false };
static const struct acpi_gpio_params shps_device_presence = { 5, 0, false };
static const struct acpi_gpio_mapping shps_acpi_gpios[] = {
{ "base_presence-int-gpio", &shps_base_presence_int, 1 },
{ "base_presence-gpio", &shps_base_presence, 1 },
{ "device_power-int-gpio", &shps_device_power_int, 1 },
{ "device_power-gpio", &shps_device_power, 1 },
{ "device_presence-int-gpio", &shps_device_presence_int, 1 },
{ "device_presence-gpio", &shps_device_presence, 1 },
{ },
};
/* 5515a847-ed55-4b27-8352-cd320e10360a */
static const guid_t shps_dsm_guid =
GUID_INIT(0x5515a847, 0xed55, 0x4b27, 0x83, 0x52, 0xcd, 0x32, 0x0e, 0x10, 0x36, 0x0a);
#define SHPS_DSM_REVISION 1
enum shps_dsm_fn {
SHPS_DSM_FN_PCI_NUM_ENTRIES = 0x01,
SHPS_DSM_FN_PCI_GET_ENTRIES = 0x02,
SHPS_DSM_FN_IRQ_BASE_PRESENCE = 0x03,
SHPS_DSM_FN_IRQ_DEVICE_POWER = 0x04,
SHPS_DSM_FN_IRQ_DEVICE_PRESENCE = 0x05,
};
enum shps_irq_type {
/* NOTE: Must be in order of enum shps_dsm_fn above. */
SHPS_IRQ_TYPE_BASE_PRESENCE = 0,
SHPS_IRQ_TYPE_DEVICE_POWER = 1,
SHPS_IRQ_TYPE_DEVICE_PRESENCE = 2,
SHPS_NUM_IRQS,
};
static const char *const shps_gpio_names[] = {
[SHPS_IRQ_TYPE_BASE_PRESENCE] = "base_presence",
[SHPS_IRQ_TYPE_DEVICE_POWER] = "device_power",
[SHPS_IRQ_TYPE_DEVICE_PRESENCE] = "device_presence",
};
struct shps_device {
struct mutex lock[SHPS_NUM_IRQS]; /* Protects update in shps_dsm_notify_irq() */
struct gpio_desc *gpio[SHPS_NUM_IRQS];
unsigned int irq[SHPS_NUM_IRQS];
};
#define SHPS_IRQ_NOT_PRESENT ((unsigned int)-1)
static enum shps_dsm_fn shps_dsm_fn_for_irq(enum shps_irq_type type)
{
return SHPS_DSM_FN_IRQ_BASE_PRESENCE + type;
}
static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type type)
{
struct shps_device *sdev = platform_get_drvdata(pdev);
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
union acpi_object *result;
union acpi_object param;
int value;
mutex_lock(&sdev->lock[type]);
value = gpiod_get_value_cansleep(sdev->gpio[type]);
if (value < 0) {
mutex_unlock(&sdev->lock[type]);
dev_err(&pdev->dev, "failed to get gpio: %d (irq=%d)\n", type, value);
return;
}
dev_dbg(&pdev->dev, "IRQ notification via DSM (irq=%d, value=%d)\n", type, value);
param.type = ACPI_TYPE_INTEGER;
param.integer.value = value;
result = acpi_evaluate_dsm_typed(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
shps_dsm_fn_for_irq(type), ¶m, ACPI_TYPE_BUFFER);
if (!result) {
dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n",
type, value);
} else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
dev_err(&pdev->dev,
"IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n",
type, value);
}
mutex_unlock(&sdev->lock[type]);
ACPI_FREE(result);
}
static irqreturn_t shps_handle_irq(int irq, void *data)
{
struct platform_device *pdev = data;
struct shps_device *sdev = platform_get_drvdata(pdev);
int type;
/* Figure out which IRQ we're handling. */
for (type = 0; type < SHPS_NUM_IRQS; type++)
if (irq == sdev->irq[type])
break;
/* We should have found our interrupt, if not: this is a bug. */
if (WARN(type >= SHPS_NUM_IRQS, "invalid IRQ number: %d\n", irq))
return IRQ_HANDLED;
/* Forward interrupt to ACPI via DSM. */
shps_dsm_notify_irq(pdev, type);
return IRQ_HANDLED;
}
static int shps_setup_irq(struct platform_device *pdev, enum shps_irq_type type)
{
unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
struct shps_device *sdev = platform_get_drvdata(pdev);
struct gpio_desc *gpiod;
acpi_handle handle = ACPI_HANDLE(&pdev->dev);
const char *irq_name;
const int dsm = shps_dsm_fn_for_irq(type);
int status, irq;
/*
* Only set up interrupts that we actually need: The Surface Book 3
* does not have a DSM for base presence, so don't set up an interrupt
* for that.
*/
if (!acpi_check_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION, BIT(dsm))) {
dev_dbg(&pdev->dev, "IRQ notification via DSM not present (irq=%d)\n", type);
return 0;
}
gpiod = devm_gpiod_get(&pdev->dev, shps_gpio_names[type], GPIOD_ASIS);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
irq = gpiod_to_irq(gpiod);
if (irq < 0)
return irq;
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "shps-irq-%d", type);
if (!irq_name)
return -ENOMEM;
status = devm_request_threaded_irq(&pdev->dev, irq, NULL, shps_handle_irq,
flags, irq_name, pdev);
if (status)
return status;
dev_dbg(&pdev->dev, "set up irq %d as type %d\n", irq, type);
sdev->gpio[type] = gpiod;
sdev->irq[type] = irq;
return 0;
}
static int surface_hotplug_remove(struct platform_device *pdev)
{
struct shps_device *sdev = platform_get_drvdata(pdev);
int i;
/* Ensure that IRQs have been fully handled and won't trigger any more. */
for (i = 0; i < SHPS_NUM_IRQS; i++) {
if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
disable_irq(sdev->irq[i]);
mutex_destroy(&sdev->lock[i]);
}
return 0;
}
static int surface_hotplug_probe(struct platform_device *pdev)
{
struct shps_device *sdev;
int status, i;
/*
* The MSHW0153 device is also present on the Surface Laptop 3,
* however that doesn't have a hot-pluggable PCIe device. It also
* doesn't have any GPIO interrupts/pins under the MSHW0153, so filter
* it out here.
*/
if (gpiod_count(&pdev->dev, NULL) < 0)
return -ENODEV;
status = devm_acpi_dev_add_driver_gpios(&pdev->dev, shps_acpi_gpios);
if (status)
return status;
sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
if (!sdev)
return -ENOMEM;
platform_set_drvdata(pdev, sdev);
/*
* Initialize IRQs so that we can safely call surface_hotplug_remove()
* on errors.
*/
for (i = 0; i < SHPS_NUM_IRQS; i++)
sdev->irq[i] = SHPS_IRQ_NOT_PRESENT;
/* Set up IRQs. */
for (i = 0; i < SHPS_NUM_IRQS; i++) {
mutex_init(&sdev->lock[i]);
status = shps_setup_irq(pdev, i);
if (status) {
dev_err(&pdev->dev, "failed to set up IRQ %d: %d\n", i, status);
goto err;
}
}
/* Ensure everything is up-to-date. */
for (i = 0; i < SHPS_NUM_IRQS; i++)
if (sdev->irq[i] != SHPS_IRQ_NOT_PRESENT)
shps_dsm_notify_irq(pdev, i);
return 0;
err:
surface_hotplug_remove(pdev);
return status;
}
static const struct acpi_device_id surface_hotplug_acpi_match[] = {
{ "MSHW0153", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, surface_hotplug_acpi_match);
static struct platform_driver surface_hotplug_driver = {
.probe = surface_hotplug_probe,
.remove = surface_hotplug_remove,
.driver = {
.name = "surface_hotplug",
.acpi_match_table = surface_hotplug_acpi_match,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(surface_hotplug_driver);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Surface Hot-Plug Signaling Driver for Surface Book Devices");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/surface_hotplug.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Main SSAM/SSH controller structure and functionality.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/serial_hub.h>
#include "controller.h"
#include "ssh_msgb.h"
#include "ssh_request_layer.h"
#include "trace.h"
/* -- Safe counters. -------------------------------------------------------- */
/**
* ssh_seq_reset() - Reset/initialize sequence ID counter.
* @c: The counter to reset.
*/
static void ssh_seq_reset(struct ssh_seq_counter *c)
{
WRITE_ONCE(c->value, 0);
}
/**
* ssh_seq_next() - Get next sequence ID.
* @c: The counter providing the sequence IDs.
*
* Return: Returns the next sequence ID of the counter.
*/
static u8 ssh_seq_next(struct ssh_seq_counter *c)
{
u8 old = READ_ONCE(c->value);
u8 new = old + 1;
u8 ret;
while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
old = ret;
new = old + 1;
}
return old;
}
/**
* ssh_rqid_reset() - Reset/initialize request ID counter.
* @c: The counter to reset.
*/
static void ssh_rqid_reset(struct ssh_rqid_counter *c)
{
WRITE_ONCE(c->value, 0);
}
/**
* ssh_rqid_next() - Get next request ID.
* @c: The counter providing the request IDs.
*
* Return: Returns the next request ID of the counter, skipping any reserved
* request IDs.
*/
static u16 ssh_rqid_next(struct ssh_rqid_counter *c)
{
u16 old = READ_ONCE(c->value);
u16 new = ssh_rqid_next_valid(old);
u16 ret;
while (unlikely((ret = cmpxchg(&c->value, old, new)) != old)) {
old = ret;
new = ssh_rqid_next_valid(old);
}
return old;
}
/* -- Event notifier/callbacks. --------------------------------------------- */
/*
* The notifier system is based on linux/notifier.h, specifically the SRCU
* implementation. The difference to that is, that some bits of the notifier
* call return value can be tracked across multiple calls. This is done so
* that handling of events can be tracked and a warning can be issued in case
* an event goes unhandled. The idea of that warning is that it should help
* discover and identify new/currently unimplemented features.
*/
/**
* ssam_event_matches_notifier() - Test if an event matches a notifier.
* @n: The event notifier to test against.
* @event: The event to test.
*
* Return: Returns %true if the given event matches the given notifier
* according to the rules set in the notifier's event mask, %false otherwise.
*/
static bool ssam_event_matches_notifier(const struct ssam_event_notifier *n,
const struct ssam_event *event)
{
bool match = n->event.id.target_category == event->target_category;
if (n->event.mask & SSAM_EVENT_MASK_TARGET)
match &= n->event.reg.target_id == event->target_id;
if (n->event.mask & SSAM_EVENT_MASK_INSTANCE)
match &= n->event.id.instance == event->instance_id;
return match;
}
/**
* ssam_nfblk_call_chain() - Call event notifier callbacks of the given chain.
* @nh: The notifier head for which the notifier callbacks should be called.
* @event: The event data provided to the callbacks.
*
* Call all registered notifier callbacks in order of their priority until
* either no notifier is left or a notifier returns a value with the
* %SSAM_NOTIF_STOP bit set. Note that this bit is automatically set via
* ssam_notifier_from_errno() on any non-zero error value.
*
* Return: Returns the notifier status value, which contains the notifier
* status bits (%SSAM_NOTIF_HANDLED and %SSAM_NOTIF_STOP) as well as a
* potential error value returned from the last executed notifier callback.
* Use ssam_notifier_to_errno() to convert this value to the original error
* value.
*/
static int ssam_nfblk_call_chain(struct ssam_nf_head *nh, struct ssam_event *event)
{
struct ssam_event_notifier *nf;
int ret = 0, idx;
idx = srcu_read_lock(&nh->srcu);
list_for_each_entry_rcu(nf, &nh->head, base.node,
srcu_read_lock_held(&nh->srcu)) {
if (ssam_event_matches_notifier(nf, event)) {
ret = (ret & SSAM_NOTIF_STATE_MASK) | nf->base.fn(nf, event);
if (ret & SSAM_NOTIF_STOP)
break;
}
}
srcu_read_unlock(&nh->srcu, idx);
return ret;
}
/**
* ssam_nfblk_insert() - Insert a new notifier block into the given notifier
* list.
* @nh: The notifier head into which the block should be inserted.
* @nb: The notifier block to add.
*
* Note: This function must be synchronized by the caller with respect to other
* insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
*
* Return: Returns zero on success, %-EEXIST if the notifier block has already
* been registered.
*/
static int ssam_nfblk_insert(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
{
struct ssam_notifier_block *p;
struct list_head *h;
/* Runs under lock, no need for RCU variant. */
list_for_each(h, &nh->head) {
p = list_entry(h, struct ssam_notifier_block, node);
if (unlikely(p == nb)) {
WARN(1, "double register detected");
return -EEXIST;
}
if (nb->priority > p->priority)
break;
}
list_add_tail_rcu(&nb->node, h);
return 0;
}
/**
* ssam_nfblk_find() - Check if a notifier block is registered on the given
* notifier head.
* list.
* @nh: The notifier head on which to search.
* @nb: The notifier block to search for.
*
* Note: This function must be synchronized by the caller with respect to other
* insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
*
* Return: Returns true if the given notifier block is registered on the given
* notifier head, false otherwise.
*/
static bool ssam_nfblk_find(struct ssam_nf_head *nh, struct ssam_notifier_block *nb)
{
struct ssam_notifier_block *p;
/* Runs under lock, no need for RCU variant. */
list_for_each_entry(p, &nh->head, node) {
if (p == nb)
return true;
}
return false;
}
/**
* ssam_nfblk_remove() - Remove a notifier block from its notifier list.
* @nb: The notifier block to be removed.
*
* Note: This function must be synchronized by the caller with respect to
* other insert, find, and/or remove calls by holding ``struct ssam_nf.lock``.
* Furthermore, the caller _must_ ensure SRCU synchronization by calling
* synchronize_srcu() with ``nh->srcu`` after leaving the critical section, to
* ensure that the removed notifier block is not in use any more.
*/
static void ssam_nfblk_remove(struct ssam_notifier_block *nb)
{
list_del_rcu(&nb->node);
}
/**
* ssam_nf_head_init() - Initialize the given notifier head.
* @nh: The notifier head to initialize.
*/
static int ssam_nf_head_init(struct ssam_nf_head *nh)
{
int status;
status = init_srcu_struct(&nh->srcu);
if (status)
return status;
INIT_LIST_HEAD(&nh->head);
return 0;
}
/**
* ssam_nf_head_destroy() - Deinitialize the given notifier head.
* @nh: The notifier head to deinitialize.
*/
static void ssam_nf_head_destroy(struct ssam_nf_head *nh)
{
cleanup_srcu_struct(&nh->srcu);
}
/* -- Event/notification registry. ------------------------------------------ */
/**
* struct ssam_nf_refcount_key - Key used for event activation reference
* counting.
* @reg: The registry via which the event is enabled/disabled.
* @id: The ID uniquely describing the event.
*/
struct ssam_nf_refcount_key {
struct ssam_event_registry reg;
struct ssam_event_id id;
};
/**
* struct ssam_nf_refcount_entry - RB-tree entry for reference counting event
* activations.
* @node: The node of this entry in the rb-tree.
* @key: The key of the event.
* @refcount: The reference-count of the event.
* @flags: The flags used when enabling the event.
*/
struct ssam_nf_refcount_entry {
struct rb_node node;
struct ssam_nf_refcount_key key;
int refcount;
u8 flags;
};
/**
* ssam_nf_refcount_inc() - Increment reference-/activation-count of the given
* event.
* @nf: The notifier system reference.
* @reg: The registry used to enable/disable the event.
* @id: The event ID.
*
* Increments the reference-/activation-count associated with the specified
* event type/ID, allocating a new entry for this event ID if necessary. A
* newly allocated entry will have a refcount of one.
*
* Note: ``nf->lock`` must be held when calling this function.
*
* Return: Returns the refcount entry on success. Returns an error pointer
* with %-ENOSPC if there have already been %INT_MAX events of the specified
* ID and type registered, or %-ENOMEM if the entry could not be allocated.
*/
static struct ssam_nf_refcount_entry *
ssam_nf_refcount_inc(struct ssam_nf *nf, struct ssam_event_registry reg,
struct ssam_event_id id)
{
struct ssam_nf_refcount_entry *entry;
struct ssam_nf_refcount_key key;
struct rb_node **link = &nf->refcount.rb_node;
struct rb_node *parent = NULL;
int cmp;
lockdep_assert_held(&nf->lock);
key.reg = reg;
key.id = id;
while (*link) {
entry = rb_entry(*link, struct ssam_nf_refcount_entry, node);
parent = *link;
cmp = memcmp(&key, &entry->key, sizeof(key));
if (cmp < 0) {
link = &(*link)->rb_left;
} else if (cmp > 0) {
link = &(*link)->rb_right;
} else if (entry->refcount < INT_MAX) {
entry->refcount++;
return entry;
} else {
WARN_ON(1);
return ERR_PTR(-ENOSPC);
}
}
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
entry->key = key;
entry->refcount = 1;
rb_link_node(&entry->node, parent, link);
rb_insert_color(&entry->node, &nf->refcount);
return entry;
}
/**
* ssam_nf_refcount_dec() - Decrement reference-/activation-count of the given
* event.
* @nf: The notifier system reference.
* @reg: The registry used to enable/disable the event.
* @id: The event ID.
*
* Decrements the reference-/activation-count of the specified event,
* returning its entry. If the returned entry has a refcount of zero, the
* caller is responsible for freeing it using kfree().
*
* Note: ``nf->lock`` must be held when calling this function.
*
* Return: Returns the refcount entry on success or %NULL if the entry has not
* been found.
*/
static struct ssam_nf_refcount_entry *
ssam_nf_refcount_dec(struct ssam_nf *nf, struct ssam_event_registry reg,
struct ssam_event_id id)
{
struct ssam_nf_refcount_entry *entry;
struct ssam_nf_refcount_key key;
struct rb_node *node = nf->refcount.rb_node;
int cmp;
lockdep_assert_held(&nf->lock);
key.reg = reg;
key.id = id;
while (node) {
entry = rb_entry(node, struct ssam_nf_refcount_entry, node);
cmp = memcmp(&key, &entry->key, sizeof(key));
if (cmp < 0) {
node = node->rb_left;
} else if (cmp > 0) {
node = node->rb_right;
} else {
entry->refcount--;
if (entry->refcount == 0)
rb_erase(&entry->node, &nf->refcount);
return entry;
}
}
return NULL;
}
/**
* ssam_nf_refcount_dec_free() - Decrement reference-/activation-count of the
* given event and free its entry if the reference count reaches zero.
* @nf: The notifier system reference.
* @reg: The registry used to enable/disable the event.
* @id: The event ID.
*
* Decrements the reference-/activation-count of the specified event, freeing
* its entry if it reaches zero.
*
* Note: ``nf->lock`` must be held when calling this function.
*/
static void ssam_nf_refcount_dec_free(struct ssam_nf *nf,
struct ssam_event_registry reg,
struct ssam_event_id id)
{
struct ssam_nf_refcount_entry *entry;
lockdep_assert_held(&nf->lock);
entry = ssam_nf_refcount_dec(nf, reg, id);
if (entry && entry->refcount == 0)
kfree(entry);
}
/**
* ssam_nf_refcount_empty() - Test if the notification system has any
* enabled/active events.
* @nf: The notification system.
*/
static bool ssam_nf_refcount_empty(struct ssam_nf *nf)
{
return RB_EMPTY_ROOT(&nf->refcount);
}
/**
* ssam_nf_call() - Call notification callbacks for the provided event.
* @nf: The notifier system
* @dev: The associated device, only used for logging.
* @rqid: The request ID of the event.
* @event: The event provided to the callbacks.
*
* Execute registered callbacks in order of their priority until either no
* callback is left or a callback returns a value with the %SSAM_NOTIF_STOP
* bit set. Note that this bit is set automatically when converting non-zero
* error values via ssam_notifier_from_errno() to notifier values.
*
* Also note that any callback that could handle an event should return a value
* with bit %SSAM_NOTIF_HANDLED set, indicating that the event does not go
* unhandled/ignored. In case no registered callback could handle an event,
* this function will emit a warning.
*
* In case a callback failed, this function will emit an error message.
*/
static void ssam_nf_call(struct ssam_nf *nf, struct device *dev, u16 rqid,
struct ssam_event *event)
{
struct ssam_nf_head *nf_head;
int status, nf_ret;
if (!ssh_rqid_is_event(rqid)) {
dev_warn(dev, "event: unsupported rqid: %#06x\n", rqid);
return;
}
nf_head = &nf->head[ssh_rqid_to_event(rqid)];
nf_ret = ssam_nfblk_call_chain(nf_head, event);
status = ssam_notifier_to_errno(nf_ret);
if (status < 0) {
dev_err(dev,
"event: error handling event: %d (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
status, event->target_category, event->target_id,
event->command_id, event->instance_id);
} else if (!(nf_ret & SSAM_NOTIF_HANDLED)) {
dev_warn(dev,
"event: unhandled event (rqid: %#04x, tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
rqid, event->target_category, event->target_id,
event->command_id, event->instance_id);
}
}
/**
* ssam_nf_init() - Initialize the notifier system.
* @nf: The notifier system to initialize.
*/
static int ssam_nf_init(struct ssam_nf *nf)
{
int i, status;
for (i = 0; i < SSH_NUM_EVENTS; i++) {
status = ssam_nf_head_init(&nf->head[i]);
if (status)
break;
}
if (status) {
while (i--)
ssam_nf_head_destroy(&nf->head[i]);
return status;
}
mutex_init(&nf->lock);
return 0;
}
/**
* ssam_nf_destroy() - Deinitialize the notifier system.
* @nf: The notifier system to deinitialize.
*/
static void ssam_nf_destroy(struct ssam_nf *nf)
{
int i;
for (i = 0; i < SSH_NUM_EVENTS; i++)
ssam_nf_head_destroy(&nf->head[i]);
mutex_destroy(&nf->lock);
}
/* -- Event/async request completion system. -------------------------------- */
#define SSAM_CPLT_WQ_NAME "ssam_cpltq"
/*
* SSAM_CPLT_WQ_BATCH - Maximum number of event item completions executed per
* work execution. Used to prevent livelocking of the workqueue. Value chosen
* via educated guess, may be adjusted.
*/
#define SSAM_CPLT_WQ_BATCH 10
/*
* SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN - Maximum payload length for a cached
* &struct ssam_event_item.
*
* This length has been chosen to be accommodate standard touchpad and
* keyboard input events. Events with larger payloads will be allocated
* separately.
*/
#define SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN 32
static struct kmem_cache *ssam_event_item_cache;
/**
* ssam_event_item_cache_init() - Initialize the event item cache.
*/
int ssam_event_item_cache_init(void)
{
const unsigned int size = sizeof(struct ssam_event_item)
+ SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN;
const unsigned int align = __alignof__(struct ssam_event_item);
struct kmem_cache *cache;
cache = kmem_cache_create("ssam_event_item", size, align, 0, NULL);
if (!cache)
return -ENOMEM;
ssam_event_item_cache = cache;
return 0;
}
/**
* ssam_event_item_cache_destroy() - Deinitialize the event item cache.
*/
void ssam_event_item_cache_destroy(void)
{
kmem_cache_destroy(ssam_event_item_cache);
ssam_event_item_cache = NULL;
}
static void __ssam_event_item_free_cached(struct ssam_event_item *item)
{
kmem_cache_free(ssam_event_item_cache, item);
}
static void __ssam_event_item_free_generic(struct ssam_event_item *item)
{
kfree(item);
}
/**
* ssam_event_item_free() - Free the provided event item.
* @item: The event item to free.
*/
static void ssam_event_item_free(struct ssam_event_item *item)
{
trace_ssam_event_item_free(item);
item->ops.free(item);
}
/**
* ssam_event_item_alloc() - Allocate an event item with the given payload size.
* @len: The event payload length.
* @flags: The flags used for allocation.
*
* Allocate an event item with the given payload size, preferring allocation
* from the event item cache if the payload is small enough (i.e. smaller than
* %SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN). Sets the item operations and payload
* length values. The item free callback (``ops.free``) should not be
* overwritten after this call.
*
* Return: Returns the newly allocated event item.
*/
static struct ssam_event_item *ssam_event_item_alloc(size_t len, gfp_t flags)
{
struct ssam_event_item *item;
if (len <= SSAM_EVENT_ITEM_CACHE_PAYLOAD_LEN) {
item = kmem_cache_alloc(ssam_event_item_cache, flags);
if (!item)
return NULL;
item->ops.free = __ssam_event_item_free_cached;
} else {
item = kzalloc(struct_size(item, event.data, len), flags);
if (!item)
return NULL;
item->ops.free = __ssam_event_item_free_generic;
}
item->event.length = len;
trace_ssam_event_item_alloc(item, len);
return item;
}
/**
* ssam_event_queue_push() - Push an event item to the event queue.
* @q: The event queue.
* @item: The item to add.
*/
static void ssam_event_queue_push(struct ssam_event_queue *q,
struct ssam_event_item *item)
{
spin_lock(&q->lock);
list_add_tail(&item->node, &q->head);
spin_unlock(&q->lock);
}
/**
* ssam_event_queue_pop() - Pop the next event item from the event queue.
* @q: The event queue.
*
* Returns and removes the next event item from the queue. Returns %NULL If
* there is no event item left.
*/
static struct ssam_event_item *ssam_event_queue_pop(struct ssam_event_queue *q)
{
struct ssam_event_item *item;
spin_lock(&q->lock);
item = list_first_entry_or_null(&q->head, struct ssam_event_item, node);
if (item)
list_del(&item->node);
spin_unlock(&q->lock);
return item;
}
/**
* ssam_event_queue_is_empty() - Check if the event queue is empty.
* @q: The event queue.
*/
static bool ssam_event_queue_is_empty(struct ssam_event_queue *q)
{
bool empty;
spin_lock(&q->lock);
empty = list_empty(&q->head);
spin_unlock(&q->lock);
return empty;
}
/**
* ssam_cplt_get_event_queue() - Get the event queue for the given parameters.
* @cplt: The completion system on which to look for the queue.
* @tid: The target ID of the queue.
* @rqid: The request ID representing the event ID for which to get the queue.
*
* Return: Returns the event queue corresponding to the event type described
* by the given parameters. If the request ID does not represent an event,
* this function returns %NULL. If the target ID is not supported, this
* function will fall back to the default target ID (``tid = 1``).
*/
static
struct ssam_event_queue *ssam_cplt_get_event_queue(struct ssam_cplt *cplt,
u8 tid, u16 rqid)
{
u16 event = ssh_rqid_to_event(rqid);
u16 tidx = ssh_tid_to_index(tid);
if (!ssh_rqid_is_event(rqid)) {
dev_err(cplt->dev, "event: unsupported request ID: %#06x\n", rqid);
return NULL;
}
if (!ssh_tid_is_valid(tid)) {
dev_warn(cplt->dev, "event: unsupported target ID: %u\n", tid);
tidx = 0;
}
return &cplt->event.target[tidx].queue[event];
}
/**
* ssam_cplt_submit() - Submit a work item to the completion system workqueue.
* @cplt: The completion system.
* @work: The work item to submit.
*/
static bool ssam_cplt_submit(struct ssam_cplt *cplt, struct work_struct *work)
{
return queue_work(cplt->wq, work);
}
/**
* ssam_cplt_submit_event() - Submit an event to the completion system.
* @cplt: The completion system.
* @item: The event item to submit.
*
* Submits the event to the completion system by queuing it on the event item
* queue and queuing the respective event queue work item on the completion
* workqueue, which will eventually complete the event.
*
* Return: Returns zero on success, %-EINVAL if there is no event queue that
* can handle the given event item.
*/
static int ssam_cplt_submit_event(struct ssam_cplt *cplt,
struct ssam_event_item *item)
{
struct ssam_event_queue *evq;
evq = ssam_cplt_get_event_queue(cplt, item->event.target_id, item->rqid);
if (!evq)
return -EINVAL;
ssam_event_queue_push(evq, item);
ssam_cplt_submit(cplt, &evq->work);
return 0;
}
/**
* ssam_cplt_flush() - Flush the completion system.
* @cplt: The completion system.
*
* Flush the completion system by waiting until all currently submitted work
* items have been completed.
*
* Note: This function does not guarantee that all events will have been
* handled once this call terminates. In case of a larger number of
* to-be-completed events, the event queue work function may re-schedule its
* work item, which this flush operation will ignore.
*
* This operation is only intended to, during normal operation prior to
* shutdown, try to complete most events and requests to get them out of the
* system while the system is still fully operational. It does not aim to
* provide any guarantee that all of them have been handled.
*/
static void ssam_cplt_flush(struct ssam_cplt *cplt)
{
flush_workqueue(cplt->wq);
}
static void ssam_event_queue_work_fn(struct work_struct *work)
{
struct ssam_event_queue *queue;
struct ssam_event_item *item;
struct ssam_nf *nf;
struct device *dev;
unsigned int iterations = SSAM_CPLT_WQ_BATCH;
queue = container_of(work, struct ssam_event_queue, work);
nf = &queue->cplt->event.notif;
dev = queue->cplt->dev;
/* Limit number of processed events to avoid livelocking. */
do {
item = ssam_event_queue_pop(queue);
if (!item)
return;
ssam_nf_call(nf, dev, item->rqid, &item->event);
ssam_event_item_free(item);
} while (--iterations);
if (!ssam_event_queue_is_empty(queue))
ssam_cplt_submit(queue->cplt, &queue->work);
}
/**
* ssam_event_queue_init() - Initialize an event queue.
* @cplt: The completion system on which the queue resides.
* @evq: The event queue to initialize.
*/
static void ssam_event_queue_init(struct ssam_cplt *cplt,
struct ssam_event_queue *evq)
{
evq->cplt = cplt;
spin_lock_init(&evq->lock);
INIT_LIST_HEAD(&evq->head);
INIT_WORK(&evq->work, ssam_event_queue_work_fn);
}
/**
* ssam_cplt_init() - Initialize completion system.
* @cplt: The completion system to initialize.
* @dev: The device used for logging.
*/
static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
{
struct ssam_event_target *target;
int status, c, i;
cplt->dev = dev;
cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!cplt->wq)
return -ENOMEM;
for (c = 0; c < ARRAY_SIZE(cplt->event.target); c++) {
target = &cplt->event.target[c];
for (i = 0; i < ARRAY_SIZE(target->queue); i++)
ssam_event_queue_init(cplt, &target->queue[i]);
}
status = ssam_nf_init(&cplt->event.notif);
if (status)
destroy_workqueue(cplt->wq);
return status;
}
/**
* ssam_cplt_destroy() - Deinitialize the completion system.
* @cplt: The completion system to deinitialize.
*
* Deinitialize the given completion system and ensure that all pending, i.e.
* yet-to-be-completed, event items and requests have been handled.
*/
static void ssam_cplt_destroy(struct ssam_cplt *cplt)
{
/*
* Note: destroy_workqueue ensures that all currently queued work will
* be fully completed and the workqueue drained. This means that this
* call will inherently also free any queued ssam_event_items, thus we
* don't have to take care of that here explicitly.
*/
destroy_workqueue(cplt->wq);
ssam_nf_destroy(&cplt->event.notif);
}
/* -- Main SSAM device structures. ------------------------------------------ */
/**
* ssam_controller_device() - Get the &struct device associated with this
* controller.
* @c: The controller for which to get the device.
*
* Return: Returns the &struct device associated with this controller,
* providing its lower-level transport.
*/
struct device *ssam_controller_device(struct ssam_controller *c)
{
return ssh_rtl_get_device(&c->rtl);
}
EXPORT_SYMBOL_GPL(ssam_controller_device);
static void __ssam_controller_release(struct kref *kref)
{
struct ssam_controller *ctrl = to_ssam_controller(kref, kref);
/*
* The lock-call here is to satisfy lockdep. At this point we really
* expect this to be the last remaining reference to the controller.
* Anything else is a bug.
*/
ssam_controller_lock(ctrl);
ssam_controller_destroy(ctrl);
ssam_controller_unlock(ctrl);
kfree(ctrl);
}
/**
* ssam_controller_get() - Increment reference count of controller.
* @c: The controller.
*
* Return: Returns the controller provided as input.
*/
struct ssam_controller *ssam_controller_get(struct ssam_controller *c)
{
if (c)
kref_get(&c->kref);
return c;
}
EXPORT_SYMBOL_GPL(ssam_controller_get);
/**
* ssam_controller_put() - Decrement reference count of controller.
* @c: The controller.
*/
void ssam_controller_put(struct ssam_controller *c)
{
if (c)
kref_put(&c->kref, __ssam_controller_release);
}
EXPORT_SYMBOL_GPL(ssam_controller_put);
/**
* ssam_controller_statelock() - Lock the controller against state transitions.
* @c: The controller to lock.
*
* Lock the controller against state transitions. Holding this lock guarantees
* that the controller will not transition between states, i.e. if the
* controller is in state "started", when this lock has been acquired, it will
* remain in this state at least until the lock has been released.
*
* Multiple clients may concurrently hold this lock. In other words: The
* ``statelock`` functions represent the read-lock part of a r/w-semaphore.
* Actions causing state transitions of the controller must be executed while
* holding the write-part of this r/w-semaphore (see ssam_controller_lock()
* and ssam_controller_unlock() for that).
*
* See ssam_controller_stateunlock() for the corresponding unlock function.
*/
void ssam_controller_statelock(struct ssam_controller *c)
{
down_read(&c->lock);
}
EXPORT_SYMBOL_GPL(ssam_controller_statelock);
/**
* ssam_controller_stateunlock() - Unlock controller state transitions.
* @c: The controller to unlock.
*
* See ssam_controller_statelock() for the corresponding lock function.
*/
void ssam_controller_stateunlock(struct ssam_controller *c)
{
up_read(&c->lock);
}
EXPORT_SYMBOL_GPL(ssam_controller_stateunlock);
/**
* ssam_controller_lock() - Acquire the main controller lock.
* @c: The controller to lock.
*
* This lock must be held for any state transitions, including transition to
* suspend/resumed states and during shutdown. See ssam_controller_statelock()
* for more details on controller locking.
*
* See ssam_controller_unlock() for the corresponding unlock function.
*/
void ssam_controller_lock(struct ssam_controller *c)
{
down_write(&c->lock);
}
/*
* ssam_controller_unlock() - Release the main controller lock.
* @c: The controller to unlock.
*
* See ssam_controller_lock() for the corresponding lock function.
*/
void ssam_controller_unlock(struct ssam_controller *c)
{
up_write(&c->lock);
}
static void ssam_handle_event(struct ssh_rtl *rtl,
const struct ssh_command *cmd,
const struct ssam_span *data)
{
struct ssam_controller *ctrl = to_ssam_controller(rtl, rtl);
struct ssam_event_item *item;
item = ssam_event_item_alloc(data->len, GFP_KERNEL);
if (!item)
return;
item->rqid = get_unaligned_le16(&cmd->rqid);
item->event.target_category = cmd->tc;
item->event.target_id = cmd->sid;
item->event.command_id = cmd->cid;
item->event.instance_id = cmd->iid;
memcpy(&item->event.data[0], data->ptr, data->len);
if (WARN_ON(ssam_cplt_submit_event(&ctrl->cplt, item)))
ssam_event_item_free(item);
}
static const struct ssh_rtl_ops ssam_rtl_ops = {
.handle_event = ssam_handle_event,
};
static bool ssam_notifier_is_empty(struct ssam_controller *ctrl);
static void ssam_notifier_unregister_all(struct ssam_controller *ctrl);
#define SSAM_SSH_DSM_REVISION 0
/* d5e383e1-d892-4a76-89fc-f6aaae7ed5b5 */
static const guid_t SSAM_SSH_DSM_GUID =
GUID_INIT(0xd5e383e1, 0xd892, 0x4a76,
0x89, 0xfc, 0xf6, 0xaa, 0xae, 0x7e, 0xd5, 0xb5);
enum ssh_dsm_fn {
SSH_DSM_FN_SSH_POWER_PROFILE = 0x05,
SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT = 0x06,
SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT = 0x07,
SSH_DSM_FN_D3_CLOSES_HANDLE = 0x08,
SSH_DSM_FN_SSH_BUFFER_SIZE = 0x09,
};
static int ssam_dsm_get_functions(acpi_handle handle, u64 *funcs)
{
union acpi_object *obj;
u64 mask = 0;
int i;
*funcs = 0;
/*
* The _DSM function is only present on newer models. It is not
* present on 5th and 6th generation devices (i.e. up to and including
* Surface Pro 6, Surface Laptop 2, Surface Book 2).
*
* If the _DSM is not present, indicate that no function is supported.
* This will result in default values being set.
*/
if (!acpi_has_method(handle, "_DSM"))
return 0;
obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
SSAM_SSH_DSM_REVISION, 0, NULL,
ACPI_TYPE_BUFFER);
if (!obj)
return -EIO;
for (i = 0; i < obj->buffer.length && i < 8; i++)
mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
if (mask & BIT(0))
*funcs = mask;
ACPI_FREE(obj);
return 0;
}
static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
{
union acpi_object *obj;
u64 val;
if (!(funcs & BIT_ULL(func)))
return 0; /* Not supported, leave *ret at its default value */
obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
SSAM_SSH_DSM_REVISION, func, NULL,
ACPI_TYPE_INTEGER);
if (!obj)
return -EIO;
val = obj->integer.value;
ACPI_FREE(obj);
if (val > U32_MAX)
return -ERANGE;
*ret = val;
return 0;
}
/**
* ssam_controller_caps_load_from_acpi() - Load controller capabilities from
* ACPI _DSM.
* @handle: The handle of the ACPI controller/SSH device.
* @caps: Where to store the capabilities in.
*
* Initializes the given controller capabilities with default values, then
* checks and, if the respective _DSM functions are available, loads the
* actual capabilities from the _DSM.
*
* Return: Returns zero on success, a negative error code on failure.
*/
static
int ssam_controller_caps_load_from_acpi(acpi_handle handle,
struct ssam_controller_caps *caps)
{
u32 d3_closes_handle = false;
u64 funcs;
int status;
/* Set defaults. */
caps->ssh_power_profile = U32_MAX;
caps->screen_on_sleep_idle_timeout = U32_MAX;
caps->screen_off_sleep_idle_timeout = U32_MAX;
caps->d3_closes_handle = false;
caps->ssh_buffer_size = U32_MAX;
/* Pre-load supported DSM functions. */
status = ssam_dsm_get_functions(handle, &funcs);
if (status)
return status;
/* Load actual values from ACPI, if present. */
status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_POWER_PROFILE,
&caps->ssh_power_profile);
if (status)
return status;
status = ssam_dsm_load_u32(handle, funcs,
SSH_DSM_FN_SCREEN_ON_SLEEP_IDLE_TIMEOUT,
&caps->screen_on_sleep_idle_timeout);
if (status)
return status;
status = ssam_dsm_load_u32(handle, funcs,
SSH_DSM_FN_SCREEN_OFF_SLEEP_IDLE_TIMEOUT,
&caps->screen_off_sleep_idle_timeout);
if (status)
return status;
status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_D3_CLOSES_HANDLE,
&d3_closes_handle);
if (status)
return status;
caps->d3_closes_handle = !!d3_closes_handle;
status = ssam_dsm_load_u32(handle, funcs, SSH_DSM_FN_SSH_BUFFER_SIZE,
&caps->ssh_buffer_size);
if (status)
return status;
return 0;
}
/**
* ssam_controller_init() - Initialize SSAM controller.
* @ctrl: The controller to initialize.
* @serdev: The serial device representing the underlying data transport.
*
* Initializes the given controller. Does neither start receiver nor
* transmitter threads. After this call, the controller has to be hooked up to
* the serdev core separately via &struct serdev_device_ops, relaying calls to
* ssam_controller_receive_buf() and ssam_controller_write_wakeup(). Once the
* controller has been hooked up, transmitter and receiver threads may be
* started via ssam_controller_start(). These setup steps need to be completed
* before controller can be used for requests.
*/
int ssam_controller_init(struct ssam_controller *ctrl,
struct serdev_device *serdev)
{
acpi_handle handle = ACPI_HANDLE(&serdev->dev);
int status;
init_rwsem(&ctrl->lock);
kref_init(&ctrl->kref);
status = ssam_controller_caps_load_from_acpi(handle, &ctrl->caps);
if (status)
return status;
dev_dbg(&serdev->dev,
"device capabilities:\n"
" ssh_power_profile: %u\n"
" ssh_buffer_size: %u\n"
" screen_on_sleep_idle_timeout: %u\n"
" screen_off_sleep_idle_timeout: %u\n"
" d3_closes_handle: %u\n",
ctrl->caps.ssh_power_profile,
ctrl->caps.ssh_buffer_size,
ctrl->caps.screen_on_sleep_idle_timeout,
ctrl->caps.screen_off_sleep_idle_timeout,
ctrl->caps.d3_closes_handle);
ssh_seq_reset(&ctrl->counter.seq);
ssh_rqid_reset(&ctrl->counter.rqid);
/* Initialize event/request completion system. */
status = ssam_cplt_init(&ctrl->cplt, &serdev->dev);
if (status)
return status;
/* Initialize request and packet transport layers. */
status = ssh_rtl_init(&ctrl->rtl, serdev, &ssam_rtl_ops);
if (status) {
ssam_cplt_destroy(&ctrl->cplt);
return status;
}
/*
* Set state via write_once even though we expect to be in an
* exclusive context, due to smoke-testing in
* ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_INITIALIZED);
return 0;
}
/**
* ssam_controller_start() - Start the receiver and transmitter threads of the
* controller.
* @ctrl: The controller.
*
* Note: When this function is called, the controller should be properly
* hooked up to the serdev core via &struct serdev_device_ops. Please refer
* to ssam_controller_init() for more details on controller initialization.
*
* This function must be called with the main controller lock held (i.e. by
* calling ssam_controller_lock()).
*/
int ssam_controller_start(struct ssam_controller *ctrl)
{
int status;
lockdep_assert_held_write(&ctrl->lock);
if (ctrl->state != SSAM_CONTROLLER_INITIALIZED)
return -EINVAL;
status = ssh_rtl_start(&ctrl->rtl);
if (status)
return status;
/*
* Set state via write_once even though we expect to be locked/in an
* exclusive context, due to smoke-testing in
* ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
return 0;
}
/*
* SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT - Timeout for flushing requests during
* shutdown.
*
* Chosen to be larger than one full request timeout, including packets timing
* out. This value should give ample time to complete any outstanding requests
* during normal operation and account for the odd package timeout.
*/
#define SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT msecs_to_jiffies(5000)
/**
* ssam_controller_shutdown() - Shut down the controller.
* @ctrl: The controller.
*
* Shuts down the controller by flushing all pending requests and stopping the
* transmitter and receiver threads. All requests submitted after this call
* will fail with %-ESHUTDOWN. While it is discouraged to do so, this function
* is safe to use in parallel with ongoing request submission.
*
* In the course of this shutdown procedure, all currently registered
* notifiers will be unregistered. It is, however, strongly recommended to not
* rely on this behavior, and instead the party registering the notifier
* should unregister it before the controller gets shut down, e.g. via the
* SSAM bus which guarantees client devices to be removed before a shutdown.
*
* Note that events may still be pending after this call, but, due to the
* notifiers being unregistered, these events will be dropped when the
* controller is subsequently destroyed via ssam_controller_destroy().
*
* This function must be called with the main controller lock held (i.e. by
* calling ssam_controller_lock()).
*/
void ssam_controller_shutdown(struct ssam_controller *ctrl)
{
enum ssam_controller_state s = ctrl->state;
int status;
lockdep_assert_held_write(&ctrl->lock);
if (s == SSAM_CONTROLLER_UNINITIALIZED || s == SSAM_CONTROLLER_STOPPED)
return;
/*
* Try to flush pending events and requests while everything still
* works. Note: There may still be packets and/or requests in the
* system after this call (e.g. via control packets submitted by the
* packet transport layer or flush timeout / failure, ...). Those will
* be handled with the ssh_rtl_shutdown() call below.
*/
status = ssh_rtl_flush(&ctrl->rtl, SSAM_CTRL_SHUTDOWN_FLUSH_TIMEOUT);
if (status) {
ssam_err(ctrl, "failed to flush request transport layer: %d\n",
status);
}
/* Try to flush all currently completing requests and events. */
ssam_cplt_flush(&ctrl->cplt);
/*
* We expect all notifiers to have been removed by the respective client
* driver that set them up at this point. If this warning occurs, some
* client driver has not done that...
*/
WARN_ON(!ssam_notifier_is_empty(ctrl));
/*
* Nevertheless, we should still take care of drivers that don't behave
* well. Thus disable all enabled events, unregister all notifiers.
*/
ssam_notifier_unregister_all(ctrl);
/*
* Cancel remaining requests. Ensure no new ones can be queued and stop
* threads.
*/
ssh_rtl_shutdown(&ctrl->rtl);
/*
* Set state via write_once even though we expect to be locked/in an
* exclusive context, due to smoke-testing in
* ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STOPPED);
ctrl->rtl.ptl.serdev = NULL;
}
/**
* ssam_controller_destroy() - Destroy the controller and free its resources.
* @ctrl: The controller.
*
* Ensures that all resources associated with the controller get freed. This
* function should only be called after the controller has been stopped via
* ssam_controller_shutdown(). In general, this function should not be called
* directly. The only valid place to call this function directly is during
* initialization, before the controller has been fully initialized and passed
* to other processes. This function is called automatically when the
* reference count of the controller reaches zero.
*
* This function must be called with the main controller lock held (i.e. by
* calling ssam_controller_lock()).
*/
void ssam_controller_destroy(struct ssam_controller *ctrl)
{
lockdep_assert_held_write(&ctrl->lock);
if (ctrl->state == SSAM_CONTROLLER_UNINITIALIZED)
return;
WARN_ON(ctrl->state != SSAM_CONTROLLER_STOPPED);
/*
* Note: New events could still have been received after the previous
* flush in ssam_controller_shutdown, before the request transport layer
* has been shut down. At this point, after the shutdown, we can be sure
* that no new events will be queued. The call to ssam_cplt_destroy will
* ensure that those remaining are being completed and freed.
*/
/* Actually free resources. */
ssam_cplt_destroy(&ctrl->cplt);
ssh_rtl_destroy(&ctrl->rtl);
/*
* Set state via write_once even though we expect to be locked/in an
* exclusive context, due to smoke-testing in
* ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_UNINITIALIZED);
}
/**
* ssam_controller_suspend() - Suspend the controller.
* @ctrl: The controller to suspend.
*
* Marks the controller as suspended. Note that display-off and D0-exit
* notifications have to be sent manually before transitioning the controller
* into the suspended state via this function.
*
* See ssam_controller_resume() for the corresponding resume function.
*
* Return: Returns %-EINVAL if the controller is currently not in the
* "started" state.
*/
int ssam_controller_suspend(struct ssam_controller *ctrl)
{
ssam_controller_lock(ctrl);
if (ctrl->state != SSAM_CONTROLLER_STARTED) {
ssam_controller_unlock(ctrl);
return -EINVAL;
}
ssam_dbg(ctrl, "pm: suspending controller\n");
/*
* Set state via write_once even though we're locked, due to
* smoke-testing in ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_SUSPENDED);
ssam_controller_unlock(ctrl);
return 0;
}
/**
* ssam_controller_resume() - Resume the controller from suspend.
* @ctrl: The controller to resume.
*
* Resume the controller from the suspended state it was put into via
* ssam_controller_suspend(). This function does not issue display-on and
* D0-entry notifications. If required, those have to be sent manually after
* this call.
*
* Return: Returns %-EINVAL if the controller is currently not suspended.
*/
int ssam_controller_resume(struct ssam_controller *ctrl)
{
ssam_controller_lock(ctrl);
if (ctrl->state != SSAM_CONTROLLER_SUSPENDED) {
ssam_controller_unlock(ctrl);
return -EINVAL;
}
ssam_dbg(ctrl, "pm: resuming controller\n");
/*
* Set state via write_once even though we're locked, due to
* smoke-testing in ssam_request_sync_submit().
*/
WRITE_ONCE(ctrl->state, SSAM_CONTROLLER_STARTED);
ssam_controller_unlock(ctrl);
return 0;
}
/* -- Top-level request interface ------------------------------------------- */
/**
* ssam_request_write_data() - Construct and write SAM request message to
* buffer.
* @buf: The buffer to write the data to.
* @ctrl: The controller via which the request will be sent.
* @spec: The request data and specification.
*
* Constructs a SAM/SSH request message and writes it to the provided buffer.
* The request and transport counters, specifically RQID and SEQ, will be set
* in this call. These counters are obtained from the controller. It is thus
* only valid to send the resulting message via the controller specified here.
*
* For calculation of the required buffer size, refer to the
* SSH_COMMAND_MESSAGE_LENGTH() macro.
*
* Return: Returns the number of bytes used in the buffer on success. Returns
* %-EINVAL if the payload length provided in the request specification is too
* large (larger than %SSH_COMMAND_MAX_PAYLOAD_SIZE) or if the provided buffer
* is too small.
*/
ssize_t ssam_request_write_data(struct ssam_span *buf,
struct ssam_controller *ctrl,
const struct ssam_request *spec)
{
struct msgbuf msgb;
u16 rqid;
u8 seq;
if (spec->length > SSH_COMMAND_MAX_PAYLOAD_SIZE)
return -EINVAL;
if (SSH_COMMAND_MESSAGE_LENGTH(spec->length) > buf->len)
return -EINVAL;
msgb_init(&msgb, buf->ptr, buf->len);
seq = ssh_seq_next(&ctrl->counter.seq);
rqid = ssh_rqid_next(&ctrl->counter.rqid);
msgb_push_cmd(&msgb, seq, rqid, spec);
return msgb_bytes_used(&msgb);
}
EXPORT_SYMBOL_GPL(ssam_request_write_data);
static void ssam_request_sync_complete(struct ssh_request *rqst,
const struct ssh_command *cmd,
const struct ssam_span *data, int status)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
struct ssam_request_sync *r;
r = container_of(rqst, struct ssam_request_sync, base);
r->status = status;
if (r->resp)
r->resp->length = 0;
if (status) {
rtl_dbg_cond(rtl, "rsp: request failed: %d\n", status);
return;
}
if (!data) /* Handle requests without a response. */
return;
if (!r->resp || !r->resp->pointer) {
if (data->len)
rtl_warn(rtl, "rsp: no response buffer provided, dropping data\n");
return;
}
if (data->len > r->resp->capacity) {
rtl_err(rtl,
"rsp: response buffer too small, capacity: %zu bytes, got: %zu bytes\n",
r->resp->capacity, data->len);
r->status = -ENOSPC;
return;
}
r->resp->length = data->len;
memcpy(r->resp->pointer, data->ptr, data->len);
}
static void ssam_request_sync_release(struct ssh_request *rqst)
{
complete_all(&container_of(rqst, struct ssam_request_sync, base)->comp);
}
static const struct ssh_request_ops ssam_request_sync_ops = {
.release = ssam_request_sync_release,
.complete = ssam_request_sync_complete,
};
/**
* ssam_request_sync_alloc() - Allocate a synchronous request.
* @payload_len: The length of the request payload.
* @flags: Flags used for allocation.
* @rqst: Where to store the pointer to the allocated request.
* @buffer: Where to store the buffer descriptor for the message buffer of
* the request.
*
* Allocates a synchronous request with corresponding message buffer. The
* request still needs to be initialized ssam_request_sync_init() before
* it can be submitted, and the message buffer data must still be set to the
* returned buffer via ssam_request_sync_set_data() after it has been filled,
* if need be with adjusted message length.
*
* After use, the request and its corresponding message buffer should be freed
* via ssam_request_sync_free(). The buffer must not be freed separately.
*
* Return: Returns zero on success, %-ENOMEM if the request could not be
* allocated.
*/
int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
struct ssam_request_sync **rqst,
struct ssam_span *buffer)
{
size_t msglen = SSH_COMMAND_MESSAGE_LENGTH(payload_len);
*rqst = kzalloc(sizeof(**rqst) + msglen, flags);
if (!*rqst)
return -ENOMEM;
buffer->ptr = (u8 *)(*rqst + 1);
buffer->len = msglen;
return 0;
}
EXPORT_SYMBOL_GPL(ssam_request_sync_alloc);
/**
* ssam_request_sync_free() - Free a synchronous request.
* @rqst: The request to be freed.
*
* Free a synchronous request and its corresponding buffer allocated with
* ssam_request_sync_alloc(). Do not use for requests allocated on the stack
* or via any other function.
*
* Warning: The caller must ensure that the request is not in use any more.
* I.e. the caller must ensure that it has the only reference to the request
* and the request is not currently pending. This means that the caller has
* either never submitted the request, request submission has failed, or the
* caller has waited until the submitted request has been completed via
* ssam_request_sync_wait().
*/
void ssam_request_sync_free(struct ssam_request_sync *rqst)
{
kfree(rqst);
}
EXPORT_SYMBOL_GPL(ssam_request_sync_free);
/**
* ssam_request_sync_init() - Initialize a synchronous request struct.
* @rqst: The request to initialize.
* @flags: The request flags.
*
* Initializes the given request struct. Does not initialize the request
* message data. This has to be done explicitly after this call via
* ssam_request_sync_set_data() and the actual message data has to be written
* via ssam_request_write_data().
*
* Return: Returns zero on success or %-EINVAL if the given flags are invalid.
*/
int ssam_request_sync_init(struct ssam_request_sync *rqst,
enum ssam_request_flags flags)
{
int status;
status = ssh_request_init(&rqst->base, flags, &ssam_request_sync_ops);
if (status)
return status;
init_completion(&rqst->comp);
rqst->resp = NULL;
rqst->status = 0;
return 0;
}
EXPORT_SYMBOL_GPL(ssam_request_sync_init);
/**
* ssam_request_sync_submit() - Submit a synchronous request.
* @ctrl: The controller with which to submit the request.
* @rqst: The request to submit.
*
* Submit a synchronous request. The request has to be initialized and
* properly set up, including response buffer (may be %NULL if no response is
* expected) and command message data. This function does not wait for the
* request to be completed.
*
* If this function succeeds, ssam_request_sync_wait() must be used to ensure
* that the request has been completed before the response data can be
* accessed and/or the request can be freed. On failure, the request may
* immediately be freed.
*
* This function may only be used if the controller is active, i.e. has been
* initialized and not suspended.
*/
int ssam_request_sync_submit(struct ssam_controller *ctrl,
struct ssam_request_sync *rqst)
{
int status;
/*
* This is only a superficial check. In general, the caller needs to
* ensure that the controller is initialized and is not (and does not
* get) suspended during use, i.e. until the request has been completed
* (if _absolutely_ necessary, by use of ssam_controller_statelock/
* ssam_controller_stateunlock, but something like ssam_client_link
* should be preferred as this needs to last until the request has been
* completed).
*
* Note that it is actually safe to use this function while the
* controller is in the process of being shut down (as ssh_rtl_submit
* is safe with regards to this), but it is generally discouraged to do
* so.
*/
if (WARN_ON(READ_ONCE(ctrl->state) != SSAM_CONTROLLER_STARTED)) {
ssh_request_put(&rqst->base);
return -ENODEV;
}
status = ssh_rtl_submit(&ctrl->rtl, &rqst->base);
ssh_request_put(&rqst->base);
return status;
}
EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
/**
* ssam_request_do_sync() - Execute a synchronous request.
* @ctrl: The controller via which the request will be submitted.
* @spec: The request specification and payload.
* @rsp: The response buffer.
*
* Allocates a synchronous request with its message data buffer on the heap
* via ssam_request_sync_alloc(), fully initializes it via the provided
* request specification, submits it, and finally waits for its completion
* before freeing it and returning its status.
*
* Return: Returns the status of the request or any failure during setup.
*/
int ssam_request_do_sync(struct ssam_controller *ctrl,
const struct ssam_request *spec,
struct ssam_response *rsp)
{
struct ssam_request_sync *rqst;
struct ssam_span buf;
ssize_t len;
int status;
status = ssam_request_sync_alloc(spec->length, GFP_KERNEL, &rqst, &buf);
if (status)
return status;
status = ssam_request_sync_init(rqst, spec->flags);
if (status) {
ssam_request_sync_free(rqst);
return status;
}
ssam_request_sync_set_resp(rqst, rsp);
len = ssam_request_write_data(&buf, ctrl, spec);
if (len < 0) {
ssam_request_sync_free(rqst);
return len;
}
ssam_request_sync_set_data(rqst, buf.ptr, len);
status = ssam_request_sync_submit(ctrl, rqst);
if (!status)
status = ssam_request_sync_wait(rqst);
ssam_request_sync_free(rqst);
return status;
}
EXPORT_SYMBOL_GPL(ssam_request_do_sync);
/**
* ssam_request_do_sync_with_buffer() - Execute a synchronous request with the
* provided buffer as back-end for the message buffer.
* @ctrl: The controller via which the request will be submitted.
* @spec: The request specification and payload.
* @rsp: The response buffer.
* @buf: The buffer for the request message data.
*
* Allocates a synchronous request struct on the stack, fully initializes it
* using the provided buffer as message data buffer, submits it, and then
* waits for its completion before returning its status. The
* SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
* message buffer size.
*
* This function does essentially the same as ssam_request_do_sync(), but
* instead of dynamically allocating the request and message data buffer, it
* uses the provided message data buffer and stores the (small) request struct
* on the heap.
*
* Return: Returns the status of the request or any failure during setup.
*/
int ssam_request_do_sync_with_buffer(struct ssam_controller *ctrl,
const struct ssam_request *spec,
struct ssam_response *rsp,
struct ssam_span *buf)
{
struct ssam_request_sync rqst;
ssize_t len;
int status;
status = ssam_request_sync_init(&rqst, spec->flags);
if (status)
return status;
ssam_request_sync_set_resp(&rqst, rsp);
len = ssam_request_write_data(buf, ctrl, spec);
if (len < 0)
return len;
ssam_request_sync_set_data(&rqst, buf->ptr, len);
status = ssam_request_sync_submit(ctrl, &rqst);
if (!status)
status = ssam_request_sync_wait(&rqst);
return status;
}
EXPORT_SYMBOL_GPL(ssam_request_do_sync_with_buffer);
/* -- Internal SAM requests. ------------------------------------------------ */
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x13,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x15,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x16,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x33,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
.target_category = SSAM_SSH_TC_SAM,
.target_id = SSAM_SSH_TID_SAM,
.command_id = 0x34,
.instance_id = 0x00,
});
/**
* struct ssh_notification_params - Command payload to enable/disable SSH
* notifications.
* @target_category: The target category for which notifications should be
* enabled/disabled.
* @flags: Flags determining how notifications are being sent.
* @request_id: The request ID that is used to send these notifications.
* @instance_id: The specific instance in the given target category for
* which notifications should be enabled.
*/
struct ssh_notification_params {
u8 target_category;
u8 flags;
__le16 request_id;
u8 instance_id;
} __packed;
static_assert(sizeof(struct ssh_notification_params) == 5);
static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
struct ssam_event_registry reg, u8 cid,
struct ssam_event_id id, u8 flags)
{
struct ssh_notification_params params;
struct ssam_request rqst;
struct ssam_response result;
int status;
u16 rqid = ssh_tc_to_rqid(id.target_category);
u8 buf = 0;
/* Only allow RQIDs that lie within the event spectrum. */
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
params.target_category = id.target_category;
params.instance_id = id.instance;
params.flags = flags;
put_unaligned_le16(rqid, ¶ms.request_id);
rqst.target_category = reg.target_category;
rqst.target_id = reg.target_id;
rqst.command_id = cid;
rqst.instance_id = 0x00;
rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
rqst.length = sizeof(params);
rqst.payload = (u8 *)¶ms;
result.capacity = sizeof(buf);
result.length = 0;
result.pointer = &buf;
status = ssam_retry(ssam_request_do_sync_onstack, ctrl, &rqst, &result,
sizeof(params));
return status < 0 ? status : buf;
}
/**
* ssam_ssh_event_enable() - Enable SSH event.
* @ctrl: The controller for which to enable the event.
* @reg: The event registry describing what request to use for enabling and
* disabling the event.
* @id: The event identifier.
* @flags: The event flags.
*
* Enables the specified event on the EC. This function does not manage
* reference counting of enabled events and is basically only a wrapper for
* the raw EC request. If the specified event is already enabled, the EC will
* ignore this request.
*
* Return: Returns the status of the executed SAM request (zero on success and
* negative on direct failure) or %-EPROTO if the request response indicates a
* failure.
*/
static int ssam_ssh_event_enable(struct ssam_controller *ctrl,
struct ssam_event_registry reg,
struct ssam_event_id id, u8 flags)
{
int status;
status = __ssam_ssh_event_request(ctrl, reg, reg.cid_enable, id, flags);
if (status < 0 && status != -EINVAL) {
ssam_err(ctrl,
"failed to enable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
id.target_category, id.instance, reg.target_category);
}
if (status > 0) {
ssam_err(ctrl,
"unexpected result while enabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
status, id.target_category, id.instance, reg.target_category);
return -EPROTO;
}
return status;
}
/**
* ssam_ssh_event_disable() - Disable SSH event.
* @ctrl: The controller for which to disable the event.
* @reg: The event registry describing what request to use for enabling and
* disabling the event (must be same as used when enabling the event).
* @id: The event identifier.
* @flags: The event flags (likely ignored for disabling of events).
*
* Disables the specified event on the EC. This function does not manage
* reference counting of enabled events and is basically only a wrapper for
* the raw EC request. If the specified event is already disabled, the EC will
* ignore this request.
*
* Return: Returns the status of the executed SAM request (zero on success and
* negative on direct failure) or %-EPROTO if the request response indicates a
* failure.
*/
static int ssam_ssh_event_disable(struct ssam_controller *ctrl,
struct ssam_event_registry reg,
struct ssam_event_id id, u8 flags)
{
int status;
status = __ssam_ssh_event_request(ctrl, reg, reg.cid_disable, id, flags);
if (status < 0 && status != -EINVAL) {
ssam_err(ctrl,
"failed to disable event source (tc: %#04x, iid: %#04x, reg: %#04x)\n",
id.target_category, id.instance, reg.target_category);
}
if (status > 0) {
ssam_err(ctrl,
"unexpected result while disabling event source: %#04x (tc: %#04x, iid: %#04x, reg: %#04x)\n",
status, id.target_category, id.instance, reg.target_category);
return -EPROTO;
}
return status;
}
/* -- Wrappers for internal SAM requests. ----------------------------------- */
/**
* ssam_get_firmware_version() - Get the SAM/EC firmware version.
* @ctrl: The controller.
* @version: Where to store the version number.
*
* Return: Returns zero on success or the status of the executed SAM request
* if that request failed.
*/
int ssam_get_firmware_version(struct ssam_controller *ctrl, u32 *version)
{
__le32 __version;
int status;
status = ssam_retry(ssam_ssh_get_firmware_version, ctrl, &__version);
if (status)
return status;
*version = le32_to_cpu(__version);
return 0;
}
/**
* ssam_ctrl_notif_display_off() - Notify EC that the display has been turned
* off.
* @ctrl: The controller.
*
* Notify the EC that the display has been turned off and the driver may enter
* a lower-power state. This will prevent events from being sent directly.
* Rather, the EC signals an event by pulling the wakeup GPIO high for as long
* as there are pending events. The events then need to be manually released,
* one by one, via the GPIO callback request. All pending events accumulated
* during this state can also be released by issuing the display-on
* notification, e.g. via ssam_ctrl_notif_display_on(), which will also reset
* the GPIO.
*
* On some devices, specifically ones with an integrated keyboard, the keyboard
* backlight will be turned off by this call.
*
* This function will only send the display-off notification command if
* display notifications are supported by the EC. Currently all known devices
* support these notifications.
*
* Use ssam_ctrl_notif_display_on() to reverse the effects of this function.
*
* Return: Returns zero on success or if no request has been executed, the
* status of the executed SAM request if that request failed, or %-EPROTO if
* an unexpected response has been received.
*/
int ssam_ctrl_notif_display_off(struct ssam_controller *ctrl)
{
int status;
u8 response;
ssam_dbg(ctrl, "pm: notifying display off\n");
status = ssam_retry(ssam_ssh_notif_display_off, ctrl, &response);
if (status)
return status;
if (response != 0) {
ssam_err(ctrl, "unexpected response from display-off notification: %#04x\n",
response);
return -EPROTO;
}
return 0;
}
/**
* ssam_ctrl_notif_display_on() - Notify EC that the display has been turned on.
* @ctrl: The controller.
*
* Notify the EC that the display has been turned back on and the driver has
* exited its lower-power state. This notification is the counterpart to the
* display-off notification sent via ssam_ctrl_notif_display_off() and will
* reverse its effects, including resetting events to their default behavior.
*
* This function will only send the display-on notification command if display
* notifications are supported by the EC. Currently all known devices support
* these notifications.
*
* See ssam_ctrl_notif_display_off() for more details.
*
* Return: Returns zero on success or if no request has been executed, the
* status of the executed SAM request if that request failed, or %-EPROTO if
* an unexpected response has been received.
*/
int ssam_ctrl_notif_display_on(struct ssam_controller *ctrl)
{
int status;
u8 response;
ssam_dbg(ctrl, "pm: notifying display on\n");
status = ssam_retry(ssam_ssh_notif_display_on, ctrl, &response);
if (status)
return status;
if (response != 0) {
ssam_err(ctrl, "unexpected response from display-on notification: %#04x\n",
response);
return -EPROTO;
}
return 0;
}
/**
* ssam_ctrl_notif_d0_exit() - Notify EC that the driver/device exits the D0
* power state.
* @ctrl: The controller
*
* Notifies the EC that the driver prepares to exit the D0 power state in
* favor of a lower-power state. Exact effects of this function related to the
* EC are currently unknown.
*
* This function will only send the D0-exit notification command if D0-state
* notifications are supported by the EC. Only newer Surface generations
* support these notifications.
*
* Use ssam_ctrl_notif_d0_entry() to reverse the effects of this function.
*
* Return: Returns zero on success or if no request has been executed, the
* status of the executed SAM request if that request failed, or %-EPROTO if
* an unexpected response has been received.
*/
int ssam_ctrl_notif_d0_exit(struct ssam_controller *ctrl)
{
int status;
u8 response;
if (!ctrl->caps.d3_closes_handle)
return 0;
ssam_dbg(ctrl, "pm: notifying D0 exit\n");
status = ssam_retry(ssam_ssh_notif_d0_exit, ctrl, &response);
if (status)
return status;
if (response != 0) {
ssam_err(ctrl, "unexpected response from D0-exit notification: %#04x\n",
response);
return -EPROTO;
}
return 0;
}
/**
* ssam_ctrl_notif_d0_entry() - Notify EC that the driver/device enters the D0
* power state.
* @ctrl: The controller
*
* Notifies the EC that the driver has exited a lower-power state and entered
* the D0 power state. Exact effects of this function related to the EC are
* currently unknown.
*
* This function will only send the D0-entry notification command if D0-state
* notifications are supported by the EC. Only newer Surface generations
* support these notifications.
*
* See ssam_ctrl_notif_d0_exit() for more details.
*
* Return: Returns zero on success or if no request has been executed, the
* status of the executed SAM request if that request failed, or %-EPROTO if
* an unexpected response has been received.
*/
int ssam_ctrl_notif_d0_entry(struct ssam_controller *ctrl)
{
int status;
u8 response;
if (!ctrl->caps.d3_closes_handle)
return 0;
ssam_dbg(ctrl, "pm: notifying D0 entry\n");
status = ssam_retry(ssam_ssh_notif_d0_entry, ctrl, &response);
if (status)
return status;
if (response != 0) {
ssam_err(ctrl, "unexpected response from D0-entry notification: %#04x\n",
response);
return -EPROTO;
}
return 0;
}
/* -- Top-level event registry interface. ----------------------------------- */
/**
* ssam_nf_refcount_enable() - Enable event for reference count entry if it has
* not already been enabled.
* @ctrl: The controller to enable the event on.
* @entry: The reference count entry for the event to be enabled.
* @flags: The flags used for enabling the event on the EC.
*
* Enable the event associated with the given reference count entry if the
* reference count equals one, i.e. the event has not previously been enabled.
* If the event has already been enabled (i.e. reference count not equal to
* one), check that the flags used for enabling match and warn about this if
* they do not.
*
* This does not modify the reference count itself, which is done with
* ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
*
* Note: ``nf->lock`` must be held when calling this function.
*
* Return: Returns zero on success. If the event is enabled by this call,
* returns the status of the event-enable EC command.
*/
static int ssam_nf_refcount_enable(struct ssam_controller *ctrl,
struct ssam_nf_refcount_entry *entry, u8 flags)
{
const struct ssam_event_registry reg = entry->key.reg;
const struct ssam_event_id id = entry->key.id;
struct ssam_nf *nf = &ctrl->cplt.event.notif;
int status;
lockdep_assert_held(&nf->lock);
ssam_dbg(ctrl, "enabling event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
reg.target_category, id.target_category, id.instance, entry->refcount);
if (entry->refcount == 1) {
status = ssam_ssh_event_enable(ctrl, reg, id, flags);
if (status)
return status;
entry->flags = flags;
} else if (entry->flags != flags) {
ssam_warn(ctrl,
"inconsistent flags when enabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
flags, entry->flags, reg.target_category, id.target_category,
id.instance);
}
return 0;
}
/**
* ssam_nf_refcount_disable_free() - Disable event for reference count entry if
* it is no longer in use and free the corresponding entry.
* @ctrl: The controller to disable the event on.
* @entry: The reference count entry for the event to be disabled.
* @flags: The flags used for enabling the event on the EC.
* @ec: Flag specifying if the event should actually be disabled on the EC.
*
* If ``ec`` equals ``true`` and the reference count equals zero (i.e. the
* event is no longer requested by any client), the specified event will be
* disabled on the EC via the corresponding request.
*
* If ``ec`` equals ``false``, no request will be sent to the EC and the event
* can be considered in a detached state (i.e. no longer used but still
* enabled). Disabling an event via this method may be required for
* hot-removable devices, where event disable requests may time out after the
* device has been physically removed.
*
* In both cases, if the reference count equals zero, the corresponding
* reference count entry will be freed. The reference count entry must not be
* used any more after a call to this function.
*
* Also checks if the flags used for disabling the event match the flags used
* for enabling the event and warns if they do not (regardless of reference
* count).
*
* This does not modify the reference count itself, which is done with
* ssam_nf_refcount_inc() / ssam_nf_refcount_dec().
*
* Note: ``nf->lock`` must be held when calling this function.
*
* Return: Returns zero on success. If the event is disabled by this call,
* returns the status of the event-enable EC command.
*/
static int ssam_nf_refcount_disable_free(struct ssam_controller *ctrl,
struct ssam_nf_refcount_entry *entry, u8 flags, bool ec)
{
const struct ssam_event_registry reg = entry->key.reg;
const struct ssam_event_id id = entry->key.id;
struct ssam_nf *nf = &ctrl->cplt.event.notif;
int status = 0;
lockdep_assert_held(&nf->lock);
ssam_dbg(ctrl, "%s event (reg: %#04x, tc: %#04x, iid: %#04x, rc: %d)\n",
ec ? "disabling" : "detaching", reg.target_category, id.target_category,
id.instance, entry->refcount);
if (entry->flags != flags) {
ssam_warn(ctrl,
"inconsistent flags when disabling event: got %#04x, expected %#04x (reg: %#04x, tc: %#04x, iid: %#04x)\n",
flags, entry->flags, reg.target_category, id.target_category,
id.instance);
}
if (ec && entry->refcount == 0) {
status = ssam_ssh_event_disable(ctrl, reg, id, flags);
kfree(entry);
}
return status;
}
/**
* ssam_notifier_register() - Register an event notifier.
* @ctrl: The controller to register the notifier on.
* @n: The event notifier to register.
*
* Register an event notifier. Increment the usage counter of the associated
* SAM event if the notifier is not marked as an observer. If the event is not
* marked as an observer and is currently not enabled, it will be enabled
* during this call. If the notifier is marked as an observer, no attempt will
* be made at enabling any event and no reference count will be modified.
*
* Notifiers marked as observers do not need to be associated with one specific
* event, i.e. as long as no event matching is performed, only the event target
* category needs to be set.
*
* Return: Returns zero on success, %-ENOSPC if there have already been
* %INT_MAX notifiers for the event ID/type associated with the notifier block
* registered, %-ENOMEM if the corresponding event entry could not be
* allocated. If this is the first time that a notifier block is registered
* for the specific associated event, returns the status of the event-enable
* EC-command.
*/
int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry = NULL;
struct ssam_nf_head *nf_head;
struct ssam_nf *nf;
int status;
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
nf = &ctrl->cplt.event.notif;
nf_head = &nf->head[ssh_rqid_to_event(rqid)];
mutex_lock(&nf->lock);
if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
entry = ssam_nf_refcount_inc(nf, n->event.reg, n->event.id);
if (IS_ERR(entry)) {
mutex_unlock(&nf->lock);
return PTR_ERR(entry);
}
}
status = ssam_nfblk_insert(nf_head, &n->base);
if (status) {
if (entry)
ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
mutex_unlock(&nf->lock);
return status;
}
if (entry) {
status = ssam_nf_refcount_enable(ctrl, entry, n->event.flags);
if (status) {
ssam_nfblk_remove(&n->base);
ssam_nf_refcount_dec_free(nf, n->event.reg, n->event.id);
mutex_unlock(&nf->lock);
synchronize_srcu(&nf_head->srcu);
return status;
}
}
mutex_unlock(&nf->lock);
return 0;
}
EXPORT_SYMBOL_GPL(ssam_notifier_register);
/**
* __ssam_notifier_unregister() - Unregister an event notifier.
* @ctrl: The controller the notifier has been registered on.
* @n: The event notifier to unregister.
* @disable: Whether to disable the corresponding event on the EC.
*
* Unregister an event notifier. Decrement the usage counter of the associated
* SAM event if the notifier is not marked as an observer. If the usage counter
* reaches zero and ``disable`` equals ``true``, the event will be disabled.
*
* Useful for hot-removable devices, where communication may fail once the
* device has been physically removed. In that case, specifying ``disable`` as
* ``false`` avoids communication with the EC.
*
* Return: Returns zero on success, %-ENOENT if the given notifier block has
* not been registered on the controller. If the given notifier block was the
* last one associated with its specific event, returns the status of the
* event-disable EC-command.
*/
int __ssam_notifier_unregister(struct ssam_controller *ctrl, struct ssam_event_notifier *n,
bool disable)
{
u16 rqid = ssh_tc_to_rqid(n->event.id.target_category);
struct ssam_nf_refcount_entry *entry;
struct ssam_nf_head *nf_head;
struct ssam_nf *nf;
int status = 0;
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
nf = &ctrl->cplt.event.notif;
nf_head = &nf->head[ssh_rqid_to_event(rqid)];
mutex_lock(&nf->lock);
if (!ssam_nfblk_find(nf_head, &n->base)) {
mutex_unlock(&nf->lock);
return -ENOENT;
}
/*
* If this is an observer notifier, do not attempt to disable the
* event, just remove it.
*/
if (!(n->flags & SSAM_EVENT_NOTIFIER_OBSERVER)) {
entry = ssam_nf_refcount_dec(nf, n->event.reg, n->event.id);
if (WARN_ON(!entry)) {
/*
* If this does not return an entry, there's a logic
* error somewhere: The notifier block is registered,
* but the event refcount entry is not there. Remove
* the notifier block anyways.
*/
status = -ENOENT;
goto remove;
}
status = ssam_nf_refcount_disable_free(ctrl, entry, n->event.flags, disable);
}
remove:
ssam_nfblk_remove(&n->base);
mutex_unlock(&nf->lock);
synchronize_srcu(&nf_head->srcu);
return status;
}
EXPORT_SYMBOL_GPL(__ssam_notifier_unregister);
/**
* ssam_controller_event_enable() - Enable the specified event.
* @ctrl: The controller to enable the event for.
* @reg: The event registry to use for enabling the event.
* @id: The event ID specifying the event to be enabled.
* @flags: The SAM event flags used for enabling the event.
*
* Increment the event reference count of the specified event. If the event has
* not been enabled previously, it will be enabled by this call.
*
* Note: In general, ssam_notifier_register() with a non-observer notifier
* should be preferred for enabling/disabling events, as this will guarantee
* proper ordering and event forwarding in case of errors during event
* enabling/disabling.
*
* Return: Returns zero on success, %-ENOSPC if the reference count for the
* specified event has reached its maximum, %-ENOMEM if the corresponding event
* entry could not be allocated. If this is the first time that this event has
* been enabled (i.e. the reference count was incremented from zero to one by
* this call), returns the status of the event-enable EC-command.
*/
int ssam_controller_event_enable(struct ssam_controller *ctrl,
struct ssam_event_registry reg,
struct ssam_event_id id, u8 flags)
{
u16 rqid = ssh_tc_to_rqid(id.target_category);
struct ssam_nf *nf = &ctrl->cplt.event.notif;
struct ssam_nf_refcount_entry *entry;
int status;
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
mutex_lock(&nf->lock);
entry = ssam_nf_refcount_inc(nf, reg, id);
if (IS_ERR(entry)) {
mutex_unlock(&nf->lock);
return PTR_ERR(entry);
}
status = ssam_nf_refcount_enable(ctrl, entry, flags);
if (status) {
ssam_nf_refcount_dec_free(nf, reg, id);
mutex_unlock(&nf->lock);
return status;
}
mutex_unlock(&nf->lock);
return 0;
}
EXPORT_SYMBOL_GPL(ssam_controller_event_enable);
/**
* ssam_controller_event_disable() - Disable the specified event.
* @ctrl: The controller to disable the event for.
* @reg: The event registry to use for disabling the event.
* @id: The event ID specifying the event to be disabled.
* @flags: The flags used when enabling the event.
*
* Decrement the reference count of the specified event. If the reference count
* reaches zero, the event will be disabled.
*
* Note: In general, ssam_notifier_register()/ssam_notifier_unregister() with a
* non-observer notifier should be preferred for enabling/disabling events, as
* this will guarantee proper ordering and event forwarding in case of errors
* during event enabling/disabling.
*
* Return: Returns zero on success, %-ENOENT if the given event has not been
* enabled on the controller. If the reference count of the event reaches zero
* during this call, returns the status of the event-disable EC-command.
*/
int ssam_controller_event_disable(struct ssam_controller *ctrl,
struct ssam_event_registry reg,
struct ssam_event_id id, u8 flags)
{
u16 rqid = ssh_tc_to_rqid(id.target_category);
struct ssam_nf *nf = &ctrl->cplt.event.notif;
struct ssam_nf_refcount_entry *entry;
int status;
if (!ssh_rqid_is_event(rqid))
return -EINVAL;
mutex_lock(&nf->lock);
entry = ssam_nf_refcount_dec(nf, reg, id);
if (!entry) {
mutex_unlock(&nf->lock);
return -ENOENT;
}
status = ssam_nf_refcount_disable_free(ctrl, entry, flags, true);
mutex_unlock(&nf->lock);
return status;
}
EXPORT_SYMBOL_GPL(ssam_controller_event_disable);
/**
* ssam_notifier_disable_registered() - Disable events for all registered
* notifiers.
* @ctrl: The controller for which to disable the notifiers/events.
*
* Disables events for all currently registered notifiers. In case of an error
* (EC command failing), all previously disabled events will be restored and
* the error code returned.
*
* This function is intended to disable all events prior to hibernation entry.
* See ssam_notifier_restore_registered() to restore/re-enable all events
* disabled with this function.
*
* Note that this function will not disable events for notifiers registered
* after calling this function. It should thus be made sure that no new
* notifiers are going to be added after this call and before the corresponding
* call to ssam_notifier_restore_registered().
*
* Return: Returns zero on success. In case of failure returns the error code
* returned by the failed EC command to disable an event.
*/
int ssam_notifier_disable_registered(struct ssam_controller *ctrl)
{
struct ssam_nf *nf = &ctrl->cplt.event.notif;
struct rb_node *n;
int status;
mutex_lock(&nf->lock);
for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
struct ssam_nf_refcount_entry *e;
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
status = ssam_ssh_event_disable(ctrl, e->key.reg,
e->key.id, e->flags);
if (status)
goto err;
}
mutex_unlock(&nf->lock);
return 0;
err:
for (n = rb_prev(n); n; n = rb_prev(n)) {
struct ssam_nf_refcount_entry *e;
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
}
mutex_unlock(&nf->lock);
return status;
}
/**
* ssam_notifier_restore_registered() - Restore/re-enable events for all
* registered notifiers.
* @ctrl: The controller for which to restore the notifiers/events.
*
* Restores/re-enables all events for which notifiers have been registered on
* the given controller. In case of a failure, the error is logged and the
* function continues to try and enable the remaining events.
*
* This function is intended to restore/re-enable all registered events after
* hibernation. See ssam_notifier_disable_registered() for the counter part
* disabling the events and more details.
*/
void ssam_notifier_restore_registered(struct ssam_controller *ctrl)
{
struct ssam_nf *nf = &ctrl->cplt.event.notif;
struct rb_node *n;
mutex_lock(&nf->lock);
for (n = rb_first(&nf->refcount); n; n = rb_next(n)) {
struct ssam_nf_refcount_entry *e;
e = rb_entry(n, struct ssam_nf_refcount_entry, node);
/* Ignore errors, will get logged in call. */
ssam_ssh_event_enable(ctrl, e->key.reg, e->key.id, e->flags);
}
mutex_unlock(&nf->lock);
}
/**
* ssam_notifier_is_empty() - Check if there are any registered notifiers.
* @ctrl: The controller to check on.
*
* Return: Returns %true if there are currently no notifiers registered on the
* controller, %false otherwise.
*/
static bool ssam_notifier_is_empty(struct ssam_controller *ctrl)
{
struct ssam_nf *nf = &ctrl->cplt.event.notif;
bool result;
mutex_lock(&nf->lock);
result = ssam_nf_refcount_empty(nf);
mutex_unlock(&nf->lock);
return result;
}
/**
* ssam_notifier_unregister_all() - Unregister all currently registered
* notifiers.
* @ctrl: The controller to unregister the notifiers on.
*
* Unregisters all currently registered notifiers. This function is used to
* ensure that all notifiers will be unregistered and associated
* entries/resources freed when the controller is being shut down.
*/
static void ssam_notifier_unregister_all(struct ssam_controller *ctrl)
{
struct ssam_nf *nf = &ctrl->cplt.event.notif;
struct ssam_nf_refcount_entry *e, *n;
mutex_lock(&nf->lock);
rbtree_postorder_for_each_entry_safe(e, n, &nf->refcount, node) {
/* Ignore errors, will get logged in call. */
ssam_ssh_event_disable(ctrl, e->key.reg, e->key.id, e->flags);
kfree(e);
}
nf->refcount = RB_ROOT;
mutex_unlock(&nf->lock);
}
/* -- Wakeup IRQ. ----------------------------------------------------------- */
static irqreturn_t ssam_irq_handle(int irq, void *dev_id)
{
struct ssam_controller *ctrl = dev_id;
ssam_dbg(ctrl, "pm: wake irq triggered\n");
/*
* Note: Proper wakeup detection is currently unimplemented.
* When the EC is in display-off or any other non-D0 state, it
* does not send events/notifications to the host. Instead it
* signals that there are events available via the wakeup IRQ.
* This driver is responsible for calling back to the EC to
* release these events one-by-one.
*
* This IRQ should not cause a full system resume by its own.
* Instead, events should be handled by their respective subsystem
* drivers, which in turn should signal whether a full system
* resume should be performed.
*
* TODO: Send GPIO callback command repeatedly to EC until callback
* returns 0x00. Return flag of callback is "has more events".
* Each time the command is sent, one event is "released". Once
* all events have been released (return = 0x00), the GPIO is
* re-armed. Detect wakeup events during this process, go back to
* sleep if no wakeup event has been received.
*/
return IRQ_HANDLED;
}
/**
* ssam_irq_setup() - Set up SAM EC wakeup-GPIO interrupt.
* @ctrl: The controller for which the IRQ should be set up.
*
* Set up an IRQ for the wakeup-GPIO pin of the SAM EC. This IRQ can be used
* to wake the device from a low power state.
*
* Note that this IRQ can only be triggered while the EC is in the display-off
* state. In this state, events are not sent to the host in the usual way.
* Instead the wakeup-GPIO gets pulled to "high" as long as there are pending
* events and these events need to be released one-by-one via the GPIO
* callback request, either until there are no events left and the GPIO is
* reset, or all at once by transitioning the EC out of the display-off state,
* which will also clear the GPIO.
*
* Not all events, however, should trigger a full system wakeup. Instead the
* driver should, if necessary, inspect and forward each event to the
* corresponding subsystem, which in turn should decide if the system needs to
* be woken up. This logic has not been implemented yet, thus wakeup by this
* IRQ should be disabled by default to avoid spurious wake-ups, caused, for
* example, by the remaining battery percentage changing. Refer to comments in
* this function and comments in the corresponding IRQ handler for more
* details on how this should be implemented.
*
* See also ssam_ctrl_notif_display_off() and ssam_ctrl_notif_display_off()
* for functions to transition the EC into and out of the display-off state as
* well as more details on it.
*
* The IRQ is disabled by default and has to be enabled before it can wake up
* the device from suspend via ssam_irq_arm_for_wakeup(). On teardown, the IRQ
* should be freed via ssam_irq_free().
*/
int ssam_irq_setup(struct ssam_controller *ctrl)
{
struct device *dev = ssam_controller_device(ctrl);
struct gpio_desc *gpiod;
int irq;
int status;
/*
* The actual GPIO interrupt is declared in ACPI as TRIGGER_HIGH.
* However, the GPIO line only gets reset by sending the GPIO callback
* command to SAM (or alternatively the display-on notification). As
* proper handling for this interrupt is not implemented yet, leaving
* the IRQ at TRIGGER_HIGH would cause an IRQ storm (as the callback
* never gets sent and thus the line never gets reset). To avoid this,
* mark the IRQ as TRIGGER_RISING for now, only creating a single
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
irq = gpiod_to_irq(gpiod);
gpiod_put(gpiod);
if (irq < 0)
return irq;
status = request_threaded_irq(irq, NULL, ssam_irq_handle, irqf,
"ssam_wakeup", ctrl);
if (status)
return status;
ctrl->irq.num = irq;
return 0;
}
/**
* ssam_irq_free() - Free SAM EC wakeup-GPIO interrupt.
* @ctrl: The controller for which the IRQ should be freed.
*
* Free the wakeup-GPIO IRQ previously set-up via ssam_irq_setup().
*/
void ssam_irq_free(struct ssam_controller *ctrl)
{
free_irq(ctrl->irq.num, ctrl);
ctrl->irq.num = -1;
}
/**
* ssam_irq_arm_for_wakeup() - Arm the EC IRQ for wakeup, if enabled.
* @ctrl: The controller for which the IRQ should be armed.
*
* Sets up the IRQ so that it can be used to wake the device. Specifically,
* this function enables the irq and then, if the device is allowed to wake up
* the system, calls enable_irq_wake(). See ssam_irq_disarm_wakeup() for the
* corresponding function to disable the IRQ.
*
* This function is intended to arm the IRQ before entering S2idle suspend.
*
* Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
* be balanced.
*/
int ssam_irq_arm_for_wakeup(struct ssam_controller *ctrl)
{
struct device *dev = ssam_controller_device(ctrl);
int status;
enable_irq(ctrl->irq.num);
if (device_may_wakeup(dev)) {
status = enable_irq_wake(ctrl->irq.num);
if (status) {
ssam_err(ctrl, "failed to enable wake IRQ: %d\n", status);
disable_irq(ctrl->irq.num);
return status;
}
ctrl->irq.wakeup_enabled = true;
} else {
ctrl->irq.wakeup_enabled = false;
}
return 0;
}
/**
* ssam_irq_disarm_wakeup() - Disarm the wakeup IRQ.
* @ctrl: The controller for which the IRQ should be disarmed.
*
* Disarm the IRQ previously set up for wake via ssam_irq_arm_for_wakeup().
*
* This function is intended to disarm the IRQ after exiting S2idle suspend.
*
* Note: calls to ssam_irq_arm_for_wakeup() and ssam_irq_disarm_wakeup() must
* be balanced.
*/
void ssam_irq_disarm_wakeup(struct ssam_controller *ctrl)
{
int status;
if (ctrl->irq.wakeup_enabled) {
status = disable_irq_wake(ctrl->irq.num);
if (status)
ssam_err(ctrl, "failed to disable wake IRQ: %d\n", status);
ctrl->irq.wakeup_enabled = false;
}
disable_irq(ctrl->irq.num);
}
| linux-master | drivers/platform/surface/aggregator/controller.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* SSH request transport layer.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/error-injection.h>
#include <linux/ktime.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/serial_hub.h>
#include <linux/surface_aggregator/controller.h>
#include "ssh_packet_layer.h"
#include "ssh_request_layer.h"
#include "trace.h"
/*
* SSH_RTL_REQUEST_TIMEOUT - Request timeout.
*
* Timeout as ktime_t delta for request responses. If we have not received a
* response in this time-frame after finishing the underlying packet
* transmission, the request will be completed with %-ETIMEDOUT as status
* code.
*/
#define SSH_RTL_REQUEST_TIMEOUT ms_to_ktime(3000)
/*
* SSH_RTL_REQUEST_TIMEOUT_RESOLUTION - Request timeout granularity.
*
* Time-resolution for timeouts. Should be larger than one jiffy to avoid
* direct re-scheduling of reaper work_struct.
*/
#define SSH_RTL_REQUEST_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
/*
* SSH_RTL_MAX_PENDING - Maximum number of pending requests.
*
* Maximum number of requests concurrently waiting to be completed (i.e.
* waiting for the corresponding packet transmission to finish if they don't
* have a response or waiting for a response if they have one).
*/
#define SSH_RTL_MAX_PENDING 3
/*
* SSH_RTL_TX_BATCH - Maximum number of requests processed per work execution.
* Used to prevent livelocking of the workqueue. Value chosen via educated
* guess, may be adjusted.
*/
#define SSH_RTL_TX_BATCH 10
#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
/**
* ssh_rtl_should_drop_response() - Error injection hook to drop request
* responses.
*
* Useful to cause request transmission timeouts in the driver by dropping the
* response to a request.
*/
static noinline bool ssh_rtl_should_drop_response(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_rtl_should_drop_response, TRUE);
#else
static inline bool ssh_rtl_should_drop_response(void)
{
return false;
}
#endif
static u16 ssh_request_get_rqid(struct ssh_request *rqst)
{
return get_unaligned_le16(rqst->packet.data.ptr
+ SSH_MSGOFFSET_COMMAND(rqid));
}
static u32 ssh_request_get_rqid_safe(struct ssh_request *rqst)
{
if (!rqst->packet.data.ptr)
return U32_MAX;
return ssh_request_get_rqid(rqst);
}
static void ssh_rtl_queue_remove(struct ssh_request *rqst)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
spin_lock(&rtl->queue.lock);
if (!test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state)) {
spin_unlock(&rtl->queue.lock);
return;
}
list_del(&rqst->node);
spin_unlock(&rtl->queue.lock);
ssh_request_put(rqst);
}
static bool ssh_rtl_queue_empty(struct ssh_rtl *rtl)
{
bool empty;
spin_lock(&rtl->queue.lock);
empty = list_empty(&rtl->queue.head);
spin_unlock(&rtl->queue.lock);
return empty;
}
static void ssh_rtl_pending_remove(struct ssh_request *rqst)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
spin_lock(&rtl->pending.lock);
if (!test_and_clear_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
spin_unlock(&rtl->pending.lock);
return;
}
atomic_dec(&rtl->pending.count);
list_del(&rqst->node);
spin_unlock(&rtl->pending.lock);
ssh_request_put(rqst);
}
static int ssh_rtl_tx_pending_push(struct ssh_request *rqst)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
spin_lock(&rtl->pending.lock);
if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
spin_unlock(&rtl->pending.lock);
return -EINVAL;
}
if (test_and_set_bit(SSH_REQUEST_SF_PENDING_BIT, &rqst->state)) {
spin_unlock(&rtl->pending.lock);
return -EALREADY;
}
atomic_inc(&rtl->pending.count);
list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head);
spin_unlock(&rtl->pending.lock);
return 0;
}
static void ssh_rtl_complete_with_status(struct ssh_request *rqst, int status)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
trace_ssam_request_complete(rqst, status);
/* rtl/ptl may not be set if we're canceling before submitting. */
rtl_dbg_cond(rtl, "rtl: completing request (rqid: %#06x, status: %d)\n",
ssh_request_get_rqid_safe(rqst), status);
rqst->ops->complete(rqst, NULL, NULL, status);
}
static void ssh_rtl_complete_with_rsp(struct ssh_request *rqst,
const struct ssh_command *cmd,
const struct ssam_span *data)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
trace_ssam_request_complete(rqst, 0);
rtl_dbg(rtl, "rtl: completing request with response (rqid: %#06x)\n",
ssh_request_get_rqid(rqst));
rqst->ops->complete(rqst, cmd, data, 0);
}
static bool ssh_rtl_tx_can_process(struct ssh_request *rqst)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
if (test_bit(SSH_REQUEST_TY_FLUSH_BIT, &rqst->state))
return !atomic_read(&rtl->pending.count);
return atomic_read(&rtl->pending.count) < SSH_RTL_MAX_PENDING;
}
static struct ssh_request *ssh_rtl_tx_next(struct ssh_rtl *rtl)
{
struct ssh_request *rqst = ERR_PTR(-ENOENT);
struct ssh_request *p, *n;
spin_lock(&rtl->queue.lock);
/* Find first non-locked request and remove it. */
list_for_each_entry_safe(p, n, &rtl->queue.head, node) {
if (unlikely(test_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state)))
continue;
if (!ssh_rtl_tx_can_process(p)) {
rqst = ERR_PTR(-EBUSY);
break;
}
/* Remove from queue and mark as transmitting. */
set_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &p->state);
/* Ensure state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &p->state);
list_del(&p->node);
rqst = p;
break;
}
spin_unlock(&rtl->queue.lock);
return rqst;
}
static int ssh_rtl_tx_try_process_one(struct ssh_rtl *rtl)
{
struct ssh_request *rqst;
int status;
/* Get and prepare next request for transmit. */
rqst = ssh_rtl_tx_next(rtl);
if (IS_ERR(rqst))
return PTR_ERR(rqst);
/* Add it to/mark it as pending. */
status = ssh_rtl_tx_pending_push(rqst);
if (status) {
ssh_request_put(rqst);
return -EAGAIN;
}
/* Submit packet. */
status = ssh_ptl_submit(&rtl->ptl, &rqst->packet);
if (status == -ESHUTDOWN) {
/*
* Packet has been refused due to the packet layer shutting
* down. Complete it here.
*/
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state);
/*
* Note: A barrier is not required here, as there are only two
* references in the system at this point: The one that we have,
* and the other one that belongs to the pending set. Due to the
* request being marked as "transmitting", our process is the
* only one allowed to remove the pending node and change the
* state. Normally, the task would fall to the packet callback,
* but as this is a path where submission failed, this callback
* will never be executed.
*/
ssh_rtl_pending_remove(rqst);
ssh_rtl_complete_with_status(rqst, -ESHUTDOWN);
ssh_request_put(rqst);
return -ESHUTDOWN;
} else if (status) {
/*
* If submitting the packet failed and the packet layer isn't
* shutting down, the packet has either been submitted/queued
* before (-EALREADY, which cannot happen as we have
* guaranteed that requests cannot be re-submitted), or the
* packet was marked as locked (-EINVAL). To mark the packet
* locked at this stage, the request, and thus the packets
* itself, had to have been canceled. Simply drop the
* reference. Cancellation itself will remove it from the set
* of pending requests.
*/
WARN_ON(status != -EINVAL);
ssh_request_put(rqst);
return -EAGAIN;
}
ssh_request_put(rqst);
return 0;
}
static bool ssh_rtl_tx_schedule(struct ssh_rtl *rtl)
{
if (atomic_read(&rtl->pending.count) >= SSH_RTL_MAX_PENDING)
return false;
if (ssh_rtl_queue_empty(rtl))
return false;
return schedule_work(&rtl->tx.work);
}
static void ssh_rtl_tx_work_fn(struct work_struct *work)
{
struct ssh_rtl *rtl = to_ssh_rtl(work, tx.work);
unsigned int iterations = SSH_RTL_TX_BATCH;
int status;
/*
* Try to be nice and not block/live-lock the workqueue: Run a maximum
* of 10 tries, then re-submit if necessary. This should not be
* necessary for normal execution, but guarantee it anyway.
*/
do {
status = ssh_rtl_tx_try_process_one(rtl);
if (status == -ENOENT || status == -EBUSY)
return; /* No more requests to process. */
if (status == -ESHUTDOWN) {
/*
* Packet system shutting down. No new packets can be
* transmitted. Return silently, the party initiating
* the shutdown should handle the rest.
*/
return;
}
WARN_ON(status != 0 && status != -EAGAIN);
} while (--iterations);
/* Out of tries, reschedule. */
ssh_rtl_tx_schedule(rtl);
}
/**
* ssh_rtl_submit() - Submit a request to the transport layer.
* @rtl: The request transport layer.
* @rqst: The request to submit.
*
* Submits a request to the transport layer. A single request may not be
* submitted multiple times without reinitializing it.
*
* Return: Returns zero on success, %-EINVAL if the request type is invalid or
* the request has been canceled prior to submission, %-EALREADY if the
* request has already been submitted, or %-ESHUTDOWN in case the request
* transport layer has been shut down.
*/
int ssh_rtl_submit(struct ssh_rtl *rtl, struct ssh_request *rqst)
{
trace_ssam_request_submit(rqst);
/*
* Ensure that requests expecting a response are sequenced. If this
* invariant ever changes, see the comment in ssh_rtl_complete() on what
* is required to be changed in the code.
*/
if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &rqst->state))
if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &rqst->packet.state))
return -EINVAL;
spin_lock(&rtl->queue.lock);
/*
* Try to set ptl and check if this request has already been submitted.
*
* Must be inside lock as we might run into a lost update problem
* otherwise: If this were outside of the lock, cancellation in
* ssh_rtl_cancel_nonpending() may run after we've set the ptl
* reference but before we enter the lock. In that case, we'd detect
* that the request is being added to the queue and would try to remove
* it from that, but removal might fail because it hasn't actually been
* added yet. By putting this cmpxchg in the critical section, we
* ensure that the queuing detection only triggers when we are already
* in the critical section and the remove process will wait until the
* push operation has been completed (via lock) due to that. Only then,
* we can safely try to remove it.
*/
if (cmpxchg(&rqst->packet.ptl, NULL, &rtl->ptl)) {
spin_unlock(&rtl->queue.lock);
return -EALREADY;
}
/*
* Ensure that we set ptl reference before we continue modifying state.
* This is required for non-pending cancellation. This barrier is paired
* with the one in ssh_rtl_cancel_nonpending().
*
* By setting the ptl reference before we test for "locked", we can
* check if the "locked" test may have already run. See comments in
* ssh_rtl_cancel_nonpending() for more detail.
*/
smp_mb__after_atomic();
if (test_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state)) {
spin_unlock(&rtl->queue.lock);
return -ESHUTDOWN;
}
if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state)) {
spin_unlock(&rtl->queue.lock);
return -EINVAL;
}
set_bit(SSH_REQUEST_SF_QUEUED_BIT, &rqst->state);
list_add_tail(&ssh_request_get(rqst)->node, &rtl->queue.head);
spin_unlock(&rtl->queue.lock);
ssh_rtl_tx_schedule(rtl);
return 0;
}
static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
ktime_t expires)
{
unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
ktime_t aexp = ktime_add(expires, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION);
spin_lock(&rtl->rtx_timeout.lock);
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
rtl->rtx_timeout.expires = expires;
mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
}
spin_unlock(&rtl->rtx_timeout.lock);
}
static void ssh_rtl_timeout_start(struct ssh_request *rqst)
{
struct ssh_rtl *rtl = ssh_request_rtl(rqst);
ktime_t timestamp = ktime_get_coarse_boottime();
ktime_t timeout = rtl->rtx_timeout.timeout;
if (test_bit(SSH_REQUEST_SF_LOCKED_BIT, &rqst->state))
return;
/*
* Note: The timestamp gets set only once. This happens on the packet
* callback. All other access to it is read-only.
*/
WRITE_ONCE(rqst->timestamp, timestamp);
/*
* Ensure timestamp is set before starting the reaper. Paired with
* implicit barrier following check on ssh_request_get_expiration() in
* ssh_rtl_timeout_reap.
*/
smp_mb__after_atomic();
ssh_rtl_timeout_reaper_mod(rtl, timestamp, timestamp + timeout);
}
static void ssh_rtl_complete(struct ssh_rtl *rtl,
const struct ssh_command *command,
const struct ssam_span *command_data)
{
struct ssh_request *r = NULL;
struct ssh_request *p, *n;
u16 rqid = get_unaligned_le16(&command->rqid);
trace_ssam_rx_response_received(command, command_data->len);
/*
* Get request from pending based on request ID and mark it as response
* received and locked.
*/
spin_lock(&rtl->pending.lock);
list_for_each_entry_safe(p, n, &rtl->pending.head, node) {
/* We generally expect requests to be processed in order. */
if (unlikely(ssh_request_get_rqid(p) != rqid))
continue;
/* Simulate response timeout. */
if (ssh_rtl_should_drop_response()) {
spin_unlock(&rtl->pending.lock);
trace_ssam_ei_rx_drop_response(p);
rtl_info(rtl, "request error injection: dropping response for request %p\n",
&p->packet);
return;
}
/*
* Mark as "response received" and "locked" as we're going to
* complete it.
*/
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &p->state);
set_bit(SSH_REQUEST_SF_RSPRCVD_BIT, &p->state);
/* Ensure state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_PENDING_BIT, &p->state);
atomic_dec(&rtl->pending.count);
list_del(&p->node);
r = p;
break;
}
spin_unlock(&rtl->pending.lock);
if (!r) {
rtl_warn(rtl, "rtl: dropping unexpected command message (rqid = %#06x)\n",
rqid);
return;
}
/* If the request hasn't been completed yet, we will do this now. */
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state)) {
ssh_request_put(r);
ssh_rtl_tx_schedule(rtl);
return;
}
/*
* Make sure the request has been transmitted. In case of a sequenced
* request, we are guaranteed that the completion callback will run on
* the receiver thread directly when the ACK for the packet has been
* received. Similarly, this function is guaranteed to run on the
* receiver thread. Thus we are guaranteed that if the packet has been
* successfully transmitted and received an ACK, the transmitted flag
* has been set and is visible here.
*
* We are currently not handling unsequenced packets here, as those
* should never expect a response as ensured in ssh_rtl_submit. If this
* ever changes, one would have to test for
*
* (r->state & (transmitting | transmitted))
*
* on unsequenced packets to determine if they could have been
* transmitted. There are no synchronization guarantees as in the
* sequenced case, since, in this case, the callback function will not
* run on the same thread. Thus an exact determination is impossible.
*/
if (!test_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state)) {
rtl_err(rtl, "rtl: received response before ACK for request (rqid = %#06x)\n",
rqid);
/*
* NB: Timeout has already been canceled, request already been
* removed from pending and marked as locked and completed. As
* we receive a "false" response, the packet might still be
* queued though.
*/
ssh_rtl_queue_remove(r);
ssh_rtl_complete_with_status(r, -EREMOTEIO);
ssh_request_put(r);
ssh_rtl_tx_schedule(rtl);
return;
}
/*
* NB: Timeout has already been canceled, request already been
* removed from pending and marked as locked and completed. The request
* can also not be queued any more, as it has been marked as
* transmitting and later transmitted. Thus no need to remove it from
* anywhere.
*/
ssh_rtl_complete_with_rsp(r, command, command_data);
ssh_request_put(r);
ssh_rtl_tx_schedule(rtl);
}
static bool ssh_rtl_cancel_nonpending(struct ssh_request *r)
{
struct ssh_rtl *rtl;
unsigned long flags, fixed;
bool remove;
/*
* Handle unsubmitted request: Try to mark the packet as locked,
* expecting the state to be zero (i.e. unsubmitted). Note that, if
* setting the state worked, we might still be adding the packet to the
* queue in a currently executing submit call. In that case, however,
* ptl reference must have been set previously, as locked is checked
* after setting ptl. Furthermore, when the ptl reference is set, the
* submission process is guaranteed to have entered the critical
* section. Thus only if we successfully locked this request and ptl is
* NULL, we have successfully removed the request, i.e. we are
* guaranteed that, due to the "locked" check in ssh_rtl_submit(), the
* packet will never be added. Otherwise, we need to try and grab it
* from the queue, where we are now guaranteed that the packet is or has
* been due to the critical section.
*
* Note that if the cmpxchg() fails, we are guaranteed that ptl has
* been set and is non-NULL, as states can only be nonzero after this
* has been set. Also note that we need to fetch the static (type)
* flags to ensure that they don't cause the cmpxchg() to fail.
*/
fixed = READ_ONCE(r->state) & SSH_REQUEST_FLAGS_TY_MASK;
flags = cmpxchg(&r->state, fixed, SSH_REQUEST_SF_LOCKED_BIT);
/*
* Force correct ordering with regards to state and ptl reference access
* to safe-guard cancellation to concurrent submission against a
* lost-update problem. First try to exchange state, then also check
* ptl if that worked. This barrier is paired with the
* one in ssh_rtl_submit().
*/
smp_mb__after_atomic();
if (flags == fixed && !READ_ONCE(r->packet.ptl)) {
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return true;
ssh_rtl_complete_with_status(r, -ECANCELED);
return true;
}
rtl = ssh_request_rtl(r);
spin_lock(&rtl->queue.lock);
/*
* Note: 1) Requests cannot be re-submitted. 2) If a request is
* queued, it cannot be "transmitting"/"pending" yet. Thus, if we
* successfully remove the request here, we have removed all its
* occurrences in the system.
*/
remove = test_and_clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
if (!remove) {
spin_unlock(&rtl->queue.lock);
return false;
}
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
list_del(&r->node);
spin_unlock(&rtl->queue.lock);
ssh_request_put(r); /* Drop reference obtained from queue. */
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return true;
ssh_rtl_complete_with_status(r, -ECANCELED);
return true;
}
static bool ssh_rtl_cancel_pending(struct ssh_request *r)
{
/* If the packet is already locked, it's going to be removed shortly. */
if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
return true;
/*
* Now that we have locked the packet, we have guaranteed that it can't
* be added to the system any more. If ptl is NULL, the locked
* check in ssh_rtl_submit() has not been run and any submission,
* currently in progress or called later, won't add the packet. Thus we
* can directly complete it.
*
* The implicit memory barrier of test_and_set_bit() should be enough
* to ensure that the correct order (first lock, then check ptl) is
* ensured. This is paired with the barrier in ssh_rtl_submit().
*/
if (!READ_ONCE(r->packet.ptl)) {
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return true;
ssh_rtl_complete_with_status(r, -ECANCELED);
return true;
}
/*
* Try to cancel the packet. If the packet has not been completed yet,
* this will subsequently (and synchronously) call the completion
* callback of the packet, which will complete the request.
*/
ssh_ptl_cancel(&r->packet);
/*
* If the packet has been completed with success, i.e. has not been
* canceled by the above call, the request may not have been completed
* yet (may be waiting for a response). Check if we need to do this
* here.
*/
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return true;
ssh_rtl_queue_remove(r);
ssh_rtl_pending_remove(r);
ssh_rtl_complete_with_status(r, -ECANCELED);
return true;
}
/**
* ssh_rtl_cancel() - Cancel request.
* @rqst: The request to cancel.
* @pending: Whether to also cancel pending requests.
*
* Cancels the given request. If @pending is %false, this will not cancel
* pending requests, i.e. requests that have already been submitted to the
* packet layer but not been completed yet. If @pending is %true, this will
* cancel the given request regardless of the state it is in.
*
* If the request has been canceled by calling this function, both completion
* and release callbacks of the request will be executed in a reasonable
* time-frame. This may happen during execution of this function, however,
* there is no guarantee for this. For example, a request currently
* transmitting will be canceled/completed only after transmission has
* completed, and the respective callbacks will be executed on the transmitter
* thread, which may happen during, but also some time after execution of the
* cancel function.
*
* Return: Returns %true if the given request has been canceled or completed,
* either by this function or prior to calling this function, %false
* otherwise. If @pending is %true, this function will always return %true.
*/
bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending)
{
struct ssh_rtl *rtl;
bool canceled;
if (test_and_set_bit(SSH_REQUEST_SF_CANCELED_BIT, &rqst->state))
return true;
trace_ssam_request_cancel(rqst);
if (pending)
canceled = ssh_rtl_cancel_pending(rqst);
else
canceled = ssh_rtl_cancel_nonpending(rqst);
/* Note: rtl may be NULL if request has not been submitted yet. */
rtl = ssh_request_rtl(rqst);
if (canceled && rtl)
ssh_rtl_tx_schedule(rtl);
return canceled;
}
static void ssh_rtl_packet_callback(struct ssh_packet *p, int status)
{
struct ssh_request *r = to_ssh_request(p);
if (unlikely(status)) {
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return;
/*
* The packet may get canceled even though it has not been
* submitted yet. The request may still be queued. Check the
* queue and remove it if necessary. As the timeout would have
* been started in this function on success, there's no need
* to cancel it here.
*/
ssh_rtl_queue_remove(r);
ssh_rtl_pending_remove(r);
ssh_rtl_complete_with_status(r, status);
ssh_rtl_tx_schedule(ssh_request_rtl(r));
return;
}
/* Update state: Mark as transmitted and clear transmitting. */
set_bit(SSH_REQUEST_SF_TRANSMITTED_BIT, &r->state);
/* Ensure state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_TRANSMITTING_BIT, &r->state);
/* If we expect a response, we just need to start the timeout. */
if (test_bit(SSH_REQUEST_TY_HAS_RESPONSE_BIT, &r->state)) {
/*
* Note: This is the only place where the timestamp gets set,
* all other access to it is read-only.
*/
ssh_rtl_timeout_start(r);
return;
}
/*
* If we don't expect a response, lock, remove, and complete the
* request. Note that, at this point, the request is guaranteed to have
* left the queue and no timeout has been started. Thus we only need to
* remove it from pending. If the request has already been completed (it
* may have been canceled) return.
*/
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
if (test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
return;
ssh_rtl_pending_remove(r);
ssh_rtl_complete_with_status(r, 0);
ssh_rtl_tx_schedule(ssh_request_rtl(r));
}
static ktime_t ssh_request_get_expiration(struct ssh_request *r, ktime_t timeout)
{
ktime_t timestamp = READ_ONCE(r->timestamp);
if (timestamp != KTIME_MAX)
return ktime_add(timestamp, timeout);
else
return KTIME_MAX;
}
static void ssh_rtl_timeout_reap(struct work_struct *work)
{
struct ssh_rtl *rtl = to_ssh_rtl(work, rtx_timeout.reaper.work);
struct ssh_request *r, *n;
LIST_HEAD(claimed);
ktime_t now = ktime_get_coarse_boottime();
ktime_t timeout = rtl->rtx_timeout.timeout;
ktime_t next = KTIME_MAX;
trace_ssam_rtl_timeout_reap(atomic_read(&rtl->pending.count));
/*
* Mark reaper as "not pending". This is done before checking any
* requests to avoid lost-update type problems.
*/
spin_lock(&rtl->rtx_timeout.lock);
rtl->rtx_timeout.expires = KTIME_MAX;
spin_unlock(&rtl->rtx_timeout.lock);
spin_lock(&rtl->pending.lock);
list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
ktime_t expires = ssh_request_get_expiration(r, timeout);
/*
* Check if the timeout hasn't expired yet. Find out next
* expiration date to be handled after this run.
*/
if (ktime_after(expires, now)) {
next = ktime_before(expires, next) ? expires : next;
continue;
}
/* Avoid further transitions if locked. */
if (test_and_set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state))
continue;
/*
* We have now marked the packet as locked. Thus it cannot be
* added to the pending or queued lists again after we've
* removed it here. We can therefore re-use the node of this
* packet temporarily.
*/
clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
atomic_dec(&rtl->pending.count);
list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->pending.lock);
/* Cancel and complete the request. */
list_for_each_entry_safe(r, n, &claimed, node) {
trace_ssam_request_timeout(r);
/*
* At this point we've removed the packet from pending. This
* means that we've obtained the last (only) reference of the
* system to it. Thus we can just complete it.
*/
if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
ssh_rtl_complete_with_status(r, -ETIMEDOUT);
/*
* Drop the reference we've obtained by removing it from the
* pending set.
*/
list_del(&r->node);
ssh_request_put(r);
}
/* Ensure that the reaper doesn't run again immediately. */
next = max(next, ktime_add(now, SSH_RTL_REQUEST_TIMEOUT_RESOLUTION));
if (next != KTIME_MAX)
ssh_rtl_timeout_reaper_mod(rtl, now, next);
ssh_rtl_tx_schedule(rtl);
}
static void ssh_rtl_rx_event(struct ssh_rtl *rtl, const struct ssh_command *cmd,
const struct ssam_span *data)
{
trace_ssam_rx_event_received(cmd, data->len);
rtl_dbg(rtl, "rtl: handling event (rqid: %#06x)\n",
get_unaligned_le16(&cmd->rqid));
rtl->ops.handle_event(rtl, cmd, data);
}
static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
{
struct ssh_rtl *rtl = to_ssh_rtl(p, ptl);
struct device *dev = &p->serdev->dev;
struct ssh_command *command;
struct ssam_span command_data;
if (sshp_parse_command(dev, data, &command, &command_data))
return;
/*
* Check if the message was intended for us. If not, drop it.
*
* Note: We will need to change this to handle debug messages. On newer
* generation devices, these seem to be sent to SSAM_SSH_TID_DEBUG. We
* as host can still receive them as they can be forwarded via an
* override option on SAM, but doing so does not change the target ID
* to SSAM_SSH_TID_HOST.
*/
if (command->tid != SSAM_SSH_TID_HOST) {
rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
command->tid);
return;
}
if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
ssh_rtl_rx_event(rtl, command, &command_data);
else
ssh_rtl_complete(rtl, command, &command_data);
}
static void ssh_rtl_rx_data(struct ssh_ptl *p, const struct ssam_span *data)
{
if (!data->len) {
ptl_err(p, "rtl: rx: no data frame payload\n");
return;
}
switch (data->ptr[0]) {
case SSH_PLD_TYPE_CMD:
ssh_rtl_rx_command(p, data);
break;
default:
ptl_err(p, "rtl: rx: unknown frame payload type (type: %#04x)\n",
data->ptr[0]);
break;
}
}
static void ssh_rtl_packet_release(struct ssh_packet *p)
{
struct ssh_request *rqst;
rqst = to_ssh_request(p);
rqst->ops->release(rqst);
}
static const struct ssh_packet_ops ssh_rtl_packet_ops = {
.complete = ssh_rtl_packet_callback,
.release = ssh_rtl_packet_release,
};
/**
* ssh_request_init() - Initialize SSH request.
* @rqst: The request to initialize.
* @flags: Request flags, determining the type of the request.
* @ops: Request operations.
*
* Initializes the given SSH request and underlying packet. Sets the message
* buffer pointer to %NULL and the message buffer length to zero. This buffer
* has to be set separately via ssh_request_set_data() before submission and
* must contain a valid SSH request message.
*
* Return: Returns zero on success or %-EINVAL if the given flags are invalid.
*/
int ssh_request_init(struct ssh_request *rqst, enum ssam_request_flags flags,
const struct ssh_request_ops *ops)
{
unsigned long type = BIT(SSH_PACKET_TY_BLOCKING_BIT);
/* Unsequenced requests cannot have a response. */
if (flags & SSAM_REQUEST_UNSEQUENCED && flags & SSAM_REQUEST_HAS_RESPONSE)
return -EINVAL;
if (!(flags & SSAM_REQUEST_UNSEQUENCED))
type |= BIT(SSH_PACKET_TY_SEQUENCED_BIT);
ssh_packet_init(&rqst->packet, type, SSH_PACKET_PRIORITY(DATA, 0),
&ssh_rtl_packet_ops);
INIT_LIST_HEAD(&rqst->node);
rqst->state = 0;
if (flags & SSAM_REQUEST_HAS_RESPONSE)
rqst->state |= BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT);
rqst->timestamp = KTIME_MAX;
rqst->ops = ops;
return 0;
}
/**
* ssh_rtl_init() - Initialize request transport layer.
* @rtl: The request transport layer to initialize.
* @serdev: The underlying serial device, i.e. the lower-level transport.
* @ops: Request transport layer operations.
*
* Initializes the given request transport layer and associated packet
* transport layer. Transmitter and receiver threads must be started
* separately via ssh_rtl_start(), after the request-layer has been
* initialized and the lower-level serial device layer has been set up.
*
* Return: Returns zero on success and a nonzero error code on failure.
*/
int ssh_rtl_init(struct ssh_rtl *rtl, struct serdev_device *serdev,
const struct ssh_rtl_ops *ops)
{
struct ssh_ptl_ops ptl_ops;
int status;
ptl_ops.data_received = ssh_rtl_rx_data;
status = ssh_ptl_init(&rtl->ptl, serdev, &ptl_ops);
if (status)
return status;
spin_lock_init(&rtl->queue.lock);
INIT_LIST_HEAD(&rtl->queue.head);
spin_lock_init(&rtl->pending.lock);
INIT_LIST_HEAD(&rtl->pending.head);
atomic_set_release(&rtl->pending.count, 0);
INIT_WORK(&rtl->tx.work, ssh_rtl_tx_work_fn);
spin_lock_init(&rtl->rtx_timeout.lock);
rtl->rtx_timeout.timeout = SSH_RTL_REQUEST_TIMEOUT;
rtl->rtx_timeout.expires = KTIME_MAX;
INIT_DELAYED_WORK(&rtl->rtx_timeout.reaper, ssh_rtl_timeout_reap);
rtl->ops = *ops;
return 0;
}
/**
* ssh_rtl_destroy() - Deinitialize request transport layer.
* @rtl: The request transport layer to deinitialize.
*
* Deinitializes the given request transport layer and frees resources
* associated with it. If receiver and/or transmitter threads have been
* started, the layer must first be shut down via ssh_rtl_shutdown() before
* this function can be called.
*/
void ssh_rtl_destroy(struct ssh_rtl *rtl)
{
ssh_ptl_destroy(&rtl->ptl);
}
/**
* ssh_rtl_start() - Start request transmitter and receiver.
* @rtl: The request transport layer.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssh_rtl_start(struct ssh_rtl *rtl)
{
int status;
status = ssh_ptl_tx_start(&rtl->ptl);
if (status)
return status;
ssh_rtl_tx_schedule(rtl);
status = ssh_ptl_rx_start(&rtl->ptl);
if (status) {
ssh_rtl_flush(rtl, msecs_to_jiffies(5000));
ssh_ptl_tx_stop(&rtl->ptl);
return status;
}
return 0;
}
struct ssh_flush_request {
struct ssh_request base;
struct completion completion;
int status;
};
static void ssh_rtl_flush_request_complete(struct ssh_request *r,
const struct ssh_command *cmd,
const struct ssam_span *data,
int status)
{
struct ssh_flush_request *rqst;
rqst = container_of(r, struct ssh_flush_request, base);
rqst->status = status;
}
static void ssh_rtl_flush_request_release(struct ssh_request *r)
{
struct ssh_flush_request *rqst;
rqst = container_of(r, struct ssh_flush_request, base);
complete_all(&rqst->completion);
}
static const struct ssh_request_ops ssh_rtl_flush_request_ops = {
.complete = ssh_rtl_flush_request_complete,
.release = ssh_rtl_flush_request_release,
};
/**
* ssh_rtl_flush() - Flush the request transport layer.
* @rtl: request transport layer
* @timeout: timeout for the flush operation in jiffies
*
* Queue a special flush request and wait for its completion. This request
* will be completed after all other currently queued and pending requests
* have been completed. Instead of a normal data packet, this request submits
* a special flush packet, meaning that upon completion, also the underlying
* packet transport layer has been flushed.
*
* Flushing the request layer guarantees that all previously submitted
* requests have been fully completed before this call returns. Additionally,
* flushing blocks execution of all later submitted requests until the flush
* has been completed.
*
* If the caller ensures that no new requests are submitted after a call to
* this function, the request transport layer is guaranteed to have no
* remaining requests when this call returns. The same guarantee does not hold
* for the packet layer, on which control packets may still be queued after
* this call.
*
* Return: Returns zero on success, %-ETIMEDOUT if the flush timed out and has
* been canceled as a result of the timeout, or %-ESHUTDOWN if the packet
* and/or request transport layer has been shut down before this call. May
* also return %-EINTR if the underlying packet transmission has been
* interrupted.
*/
int ssh_rtl_flush(struct ssh_rtl *rtl, unsigned long timeout)
{
const unsigned int init_flags = SSAM_REQUEST_UNSEQUENCED;
struct ssh_flush_request rqst;
int status;
ssh_request_init(&rqst.base, init_flags, &ssh_rtl_flush_request_ops);
rqst.base.packet.state |= BIT(SSH_PACKET_TY_FLUSH_BIT);
rqst.base.packet.priority = SSH_PACKET_PRIORITY(FLUSH, 0);
rqst.base.state |= BIT(SSH_REQUEST_TY_FLUSH_BIT);
init_completion(&rqst.completion);
status = ssh_rtl_submit(rtl, &rqst.base);
if (status)
return status;
ssh_request_put(&rqst.base);
if (!wait_for_completion_timeout(&rqst.completion, timeout)) {
ssh_rtl_cancel(&rqst.base, true);
wait_for_completion(&rqst.completion);
}
WARN_ON(rqst.status != 0 && rqst.status != -ECANCELED &&
rqst.status != -ESHUTDOWN && rqst.status != -EINTR);
return rqst.status == -ECANCELED ? -ETIMEDOUT : rqst.status;
}
/**
* ssh_rtl_shutdown() - Shut down request transport layer.
* @rtl: The request transport layer.
*
* Shuts down the request transport layer, removing and canceling all queued
* and pending requests. Requests canceled by this operation will be completed
* with %-ESHUTDOWN as status. Receiver and transmitter threads will be
* stopped, the lower-level packet layer will be shutdown.
*
* As a result of this function, the transport layer will be marked as shut
* down. Submission of requests after the transport layer has been shut down
* will fail with %-ESHUTDOWN.
*/
void ssh_rtl_shutdown(struct ssh_rtl *rtl)
{
struct ssh_request *r, *n;
LIST_HEAD(claimed);
int pending;
set_bit(SSH_RTL_SF_SHUTDOWN_BIT, &rtl->state);
/*
* Ensure that the layer gets marked as shut-down before actually
* stopping it. In combination with the check in ssh_rtl_submit(),
* this guarantees that no new requests can be added and all already
* queued requests are properly canceled.
*/
smp_mb__after_atomic();
/* Remove requests from queue. */
spin_lock(&rtl->queue.lock);
list_for_each_entry_safe(r, n, &rtl->queue.head, node) {
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
/* Ensure state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_QUEUED_BIT, &r->state);
list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->queue.lock);
/*
* We have now guaranteed that the queue is empty and no more new
* requests can be submitted (i.e. it will stay empty). This means that
* calling ssh_rtl_tx_schedule() will not schedule tx.work any more. So
* we can simply call cancel_work_sync() on tx.work here and when that
* returns, we've locked it down. This also means that after this call,
* we don't submit any more packets to the underlying packet layer, so
* we can also shut that down.
*/
cancel_work_sync(&rtl->tx.work);
ssh_ptl_shutdown(&rtl->ptl);
cancel_delayed_work_sync(&rtl->rtx_timeout.reaper);
/*
* Shutting down the packet layer should also have canceled all
* requests. Thus the pending set should be empty. Attempt to handle
* this gracefully anyways, even though this should be dead code.
*/
pending = atomic_read(&rtl->pending.count);
if (WARN_ON(pending)) {
spin_lock(&rtl->pending.lock);
list_for_each_entry_safe(r, n, &rtl->pending.head, node) {
set_bit(SSH_REQUEST_SF_LOCKED_BIT, &r->state);
/* Ensure state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_REQUEST_SF_PENDING_BIT, &r->state);
list_move_tail(&r->node, &claimed);
}
spin_unlock(&rtl->pending.lock);
}
/* Finally, cancel and complete the requests we claimed before. */
list_for_each_entry_safe(r, n, &claimed, node) {
/*
* We need test_and_set() because we still might compete with
* cancellation.
*/
if (!test_and_set_bit(SSH_REQUEST_SF_COMPLETED_BIT, &r->state))
ssh_rtl_complete_with_status(r, -ESHUTDOWN);
/*
* Drop the reference we've obtained by removing it from the
* lists.
*/
list_del(&r->node);
ssh_request_put(r);
}
}
| linux-master | drivers/platform/surface/aggregator/ssh_request_layer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* SSH packet transport layer.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/error-injection.h>
#include <linux/jiffies.h>
#include <linux/kfifo.h>
#include <linux/kref.h>
#include <linux/kthread.h>
#include <linux/ktime.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/surface_aggregator/serial_hub.h>
#include "ssh_msgb.h"
#include "ssh_packet_layer.h"
#include "ssh_parser.h"
#include "trace.h"
/*
* To simplify reasoning about the code below, we define a few concepts. The
* system below is similar to a state-machine for packets, however, there are
* too many states to explicitly write them down. To (somewhat) manage the
* states and packets we rely on flags, reference counting, and some simple
* concepts. State transitions are triggered by actions.
*
* >> Actions <<
*
* - submit
* - transmission start (process next item in queue)
* - transmission finished (guaranteed to never be parallel to transmission
* start)
* - ACK received
* - NAK received (this is equivalent to issuing re-submit for all pending
* packets)
* - timeout (this is equivalent to re-issuing a submit or canceling)
* - cancel (non-pending and pending)
*
* >> Data Structures, Packet Ownership, General Overview <<
*
* The code below employs two main data structures: The packet queue,
* containing all packets scheduled for transmission, and the set of pending
* packets, containing all packets awaiting an ACK.
*
* Shared ownership of a packet is controlled via reference counting. Inside
* the transport system are a total of five packet owners:
*
* - the packet queue,
* - the pending set,
* - the transmitter thread,
* - the receiver thread (via ACKing), and
* - the timeout work item.
*
* Normal operation is as follows: The initial reference of the packet is
* obtained by submitting the packet and queuing it. The receiver thread takes
* packets from the queue. By doing this, it does not increment the refcount
* but takes over the reference (removing it from the queue). If the packet is
* sequenced (i.e. needs to be ACKed by the client), the transmitter thread
* sets-up the timeout and adds the packet to the pending set before starting
* to transmit it. As the timeout is handled by a reaper task, no additional
* reference for it is needed. After the transmit is done, the reference held
* by the transmitter thread is dropped. If the packet is unsequenced (i.e.
* does not need an ACK), the packet is completed by the transmitter thread
* before dropping that reference.
*
* On receival of an ACK, the receiver thread removes and obtains the
* reference to the packet from the pending set. The receiver thread will then
* complete the packet and drop its reference.
*
* On receival of a NAK, the receiver thread re-submits all currently pending
* packets.
*
* Packet timeouts are detected by the timeout reaper. This is a task,
* scheduled depending on the earliest packet timeout expiration date,
* checking all currently pending packets if their timeout has expired. If the
* timeout of a packet has expired, it is re-submitted and the number of tries
* of this packet is incremented. If this number reaches its limit, the packet
* will be completed with a failure.
*
* On transmission failure (such as repeated packet timeouts), the completion
* callback is immediately run by on thread on which the error was detected.
*
* To ensure that a packet eventually leaves the system it is marked as
* "locked" directly before it is going to be completed or when it is
* canceled. Marking a packet as "locked" has the effect that passing and
* creating new references of the packet is disallowed. This means that the
* packet cannot be added to the queue, the pending set, and the timeout, or
* be picked up by the transmitter thread or receiver thread. To remove a
* packet from the system it has to be marked as locked and subsequently all
* references from the data structures (queue, pending) have to be removed.
* References held by threads will eventually be dropped automatically as
* their execution progresses.
*
* Note that the packet completion callback is, in case of success and for a
* sequenced packet, guaranteed to run on the receiver thread, thus providing
* a way to reliably identify responses to the packet. The packet completion
* callback is only run once and it does not indicate that the packet has
* fully left the system (for this, one should rely on the release method,
* triggered when the reference count of the packet reaches zero). In case of
* re-submission (and with somewhat unlikely timing), it may be possible that
* the packet is being re-transmitted while the completion callback runs.
* Completion will occur both on success and internal error, as well as when
* the packet is canceled.
*
* >> Flags <<
*
* Flags are used to indicate the state and progression of a packet. Some flags
* have stricter guarantees than other:
*
* - locked
* Indicates if the packet is locked. If the packet is locked, passing and/or
* creating additional references to the packet is forbidden. The packet thus
* may not be queued, dequeued, or removed or added to the pending set. Note
* that the packet state flags may still change (e.g. it may be marked as
* ACKed, transmitted, ...).
*
* - completed
* Indicates if the packet completion callback has been executed or is about
* to be executed. This flag is used to ensure that the packet completion
* callback is only run once.
*
* - queued
* Indicates if a packet is present in the submission queue or not. This flag
* must only be modified with the queue lock held, and must be coherent to the
* presence of the packet in the queue.
*
* - pending
* Indicates if a packet is present in the set of pending packets or not.
* This flag must only be modified with the pending lock held, and must be
* coherent to the presence of the packet in the pending set.
*
* - transmitting
* Indicates if the packet is currently transmitting. In case of
* re-transmissions, it is only safe to wait on the "transmitted" completion
* after this flag has been set. The completion will be set both in success
* and error case.
*
* - transmitted
* Indicates if the packet has been transmitted. This flag is not cleared by
* the system, thus it indicates the first transmission only.
*
* - acked
* Indicates if the packet has been acknowledged by the client. There are no
* other guarantees given. For example, the packet may still be canceled
* and/or the completion may be triggered an error even though this bit is
* set. Rely on the status provided to the completion callback instead.
*
* - canceled
* Indicates if the packet has been canceled from the outside. There are no
* other guarantees given. Specifically, the packet may be completed by
* another part of the system before the cancellation attempts to complete it.
*
* >> General Notes <<
*
* - To avoid deadlocks, if both queue and pending locks are required, the
* pending lock must be acquired before the queue lock.
*
* - The packet priority must be accessed only while holding the queue lock.
*
* - The packet timestamp must be accessed only while holding the pending
* lock.
*/
/*
* SSH_PTL_MAX_PACKET_TRIES - Maximum transmission attempts for packet.
*
* Maximum number of transmission attempts per sequenced packet in case of
* time-outs. Must be smaller than 16. If the packet times out after this
* amount of tries, the packet will be completed with %-ETIMEDOUT as status
* code.
*/
#define SSH_PTL_MAX_PACKET_TRIES 3
/*
* SSH_PTL_TX_TIMEOUT - Packet transmission timeout.
*
* Timeout in jiffies for packet transmission via the underlying serial
* device. If transmitting the packet takes longer than this timeout, the
* packet will be completed with -ETIMEDOUT. It will not be re-submitted.
*/
#define SSH_PTL_TX_TIMEOUT HZ
/*
* SSH_PTL_PACKET_TIMEOUT - Packet response timeout.
*
* Timeout as ktime_t delta for ACKs. If we have not received an ACK in this
* time-frame after starting transmission, the packet will be re-submitted.
*/
#define SSH_PTL_PACKET_TIMEOUT ms_to_ktime(1000)
/*
* SSH_PTL_PACKET_TIMEOUT_RESOLUTION - Packet timeout granularity.
*
* Time-resolution for timeouts. Should be larger than one jiffy to avoid
* direct re-scheduling of reaper work_struct.
*/
#define SSH_PTL_PACKET_TIMEOUT_RESOLUTION ms_to_ktime(max(2000 / HZ, 50))
/*
* SSH_PTL_MAX_PENDING - Maximum number of pending packets.
*
* Maximum number of sequenced packets concurrently waiting for an ACK.
* Packets marked as blocking will not be transmitted while this limit is
* reached.
*/
#define SSH_PTL_MAX_PENDING 1
/*
* SSH_PTL_RX_BUF_LEN - Evaluation-buffer size in bytes.
*/
#define SSH_PTL_RX_BUF_LEN 4096
/*
* SSH_PTL_RX_FIFO_LEN - Fifo input-buffer size in bytes.
*/
#define SSH_PTL_RX_FIFO_LEN 4096
#ifdef CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION
/**
* ssh_ptl_should_drop_ack_packet() - Error injection hook to drop ACK packets.
*
* Useful to test detection and handling of automated re-transmits by the EC.
* Specifically of packets that the EC considers not-ACKed but the driver
* already considers ACKed (due to dropped ACK). In this case, the EC
* re-transmits the packet-to-be-ACKed and the driver should detect it as
* duplicate/already handled. Note that the driver should still send an ACK
* for the re-transmitted packet.
*/
static noinline bool ssh_ptl_should_drop_ack_packet(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_ack_packet, TRUE);
/**
* ssh_ptl_should_drop_nak_packet() - Error injection hook to drop NAK packets.
*
* Useful to test/force automated (timeout-based) re-transmit by the EC.
* Specifically, packets that have not reached the driver completely/with valid
* checksums. Only useful in combination with receival of (injected) bad data.
*/
static noinline bool ssh_ptl_should_drop_nak_packet(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_nak_packet, TRUE);
/**
* ssh_ptl_should_drop_dsq_packet() - Error injection hook to drop sequenced
* data packet.
*
* Useful to test re-transmit timeout of the driver. If the data packet has not
* been ACKed after a certain time, the driver should re-transmit the packet up
* to limited number of times defined in SSH_PTL_MAX_PACKET_TRIES.
*/
static noinline bool ssh_ptl_should_drop_dsq_packet(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_drop_dsq_packet, TRUE);
/**
* ssh_ptl_should_fail_write() - Error injection hook to make
* serdev_device_write() fail.
*
* Hook to simulate errors in serdev_device_write when transmitting packets.
*/
static noinline int ssh_ptl_should_fail_write(void)
{
return 0;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_fail_write, ERRNO);
/**
* ssh_ptl_should_corrupt_tx_data() - Error injection hook to simulate invalid
* data being sent to the EC.
*
* Hook to simulate corrupt/invalid data being sent from host (driver) to EC.
* Causes the packet data to be actively corrupted by overwriting it with
* pre-defined values, such that it becomes invalid, causing the EC to respond
* with a NAK packet. Useful to test handling of NAK packets received by the
* driver.
*/
static noinline bool ssh_ptl_should_corrupt_tx_data(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_tx_data, TRUE);
/**
* ssh_ptl_should_corrupt_rx_syn() - Error injection hook to simulate invalid
* data being sent by the EC.
*
* Hook to simulate invalid SYN bytes, i.e. an invalid start of messages and
* test handling thereof in the driver.
*/
static noinline bool ssh_ptl_should_corrupt_rx_syn(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_syn, TRUE);
/**
* ssh_ptl_should_corrupt_rx_data() - Error injection hook to simulate invalid
* data being sent by the EC.
*
* Hook to simulate invalid data/checksum of the message frame and test handling
* thereof in the driver.
*/
static noinline bool ssh_ptl_should_corrupt_rx_data(void)
{
return false;
}
ALLOW_ERROR_INJECTION(ssh_ptl_should_corrupt_rx_data, TRUE);
static bool __ssh_ptl_should_drop_ack_packet(struct ssh_packet *packet)
{
if (likely(!ssh_ptl_should_drop_ack_packet()))
return false;
trace_ssam_ei_tx_drop_ack_packet(packet);
ptl_info(packet->ptl, "packet error injection: dropping ACK packet %p\n",
packet);
return true;
}
static bool __ssh_ptl_should_drop_nak_packet(struct ssh_packet *packet)
{
if (likely(!ssh_ptl_should_drop_nak_packet()))
return false;
trace_ssam_ei_tx_drop_nak_packet(packet);
ptl_info(packet->ptl, "packet error injection: dropping NAK packet %p\n",
packet);
return true;
}
static bool __ssh_ptl_should_drop_dsq_packet(struct ssh_packet *packet)
{
if (likely(!ssh_ptl_should_drop_dsq_packet()))
return false;
trace_ssam_ei_tx_drop_dsq_packet(packet);
ptl_info(packet->ptl,
"packet error injection: dropping sequenced data packet %p\n",
packet);
return true;
}
static bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
{
/* Ignore packets that don't carry any data (i.e. flush). */
if (!packet->data.ptr || !packet->data.len)
return false;
switch (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)]) {
case SSH_FRAME_TYPE_ACK:
return __ssh_ptl_should_drop_ack_packet(packet);
case SSH_FRAME_TYPE_NAK:
return __ssh_ptl_should_drop_nak_packet(packet);
case SSH_FRAME_TYPE_DATA_SEQ:
return __ssh_ptl_should_drop_dsq_packet(packet);
default:
return false;
}
}
static int ssh_ptl_write_buf(struct ssh_ptl *ptl, struct ssh_packet *packet,
const unsigned char *buf, size_t count)
{
int status;
status = ssh_ptl_should_fail_write();
if (unlikely(status)) {
trace_ssam_ei_tx_fail_write(packet, status);
ptl_info(packet->ptl,
"packet error injection: simulating transmit error %d, packet %p\n",
status, packet);
return status;
}
return serdev_device_write_buf(ptl->serdev, buf, count);
}
static void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
{
/* Ignore packets that don't carry any data (i.e. flush). */
if (!packet->data.ptr || !packet->data.len)
return;
/* Only allow sequenced data packets to be modified. */
if (packet->data.ptr[SSH_MSGOFFSET_FRAME(type)] != SSH_FRAME_TYPE_DATA_SEQ)
return;
if (likely(!ssh_ptl_should_corrupt_tx_data()))
return;
trace_ssam_ei_tx_corrupt_data(packet);
ptl_info(packet->ptl,
"packet error injection: simulating invalid transmit data on packet %p\n",
packet);
/*
* NB: The value 0xb3 has been chosen more or less randomly so that it
* doesn't have any (major) overlap with the SYN bytes (aa 55) and is
* non-trivial (i.e. non-zero, non-0xff).
*/
memset(packet->data.ptr, 0xb3, packet->data.len);
}
static void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
struct ssam_span *data)
{
struct ssam_span frame;
/* Check if there actually is something to corrupt. */
if (!sshp_find_syn(data, &frame))
return;
if (likely(!ssh_ptl_should_corrupt_rx_syn()))
return;
trace_ssam_ei_rx_corrupt_syn(data->len);
data->ptr[1] = 0xb3; /* Set second byte of SYN to "random" value. */
}
static void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
struct ssam_span *frame)
{
size_t payload_len, message_len;
struct ssh_frame *sshf;
/* Ignore incomplete messages, will get handled once it's complete. */
if (frame->len < SSH_MESSAGE_LENGTH(0))
return;
/* Ignore incomplete messages, part 2. */
payload_len = get_unaligned_le16(&frame->ptr[SSH_MSGOFFSET_FRAME(len)]);
message_len = SSH_MESSAGE_LENGTH(payload_len);
if (frame->len < message_len)
return;
if (likely(!ssh_ptl_should_corrupt_rx_data()))
return;
sshf = (struct ssh_frame *)&frame->ptr[SSH_MSGOFFSET_FRAME(type)];
trace_ssam_ei_rx_corrupt_data(sshf);
/*
* Flip bits in first byte of payload checksum. This is basically
* equivalent to a payload/frame data error without us having to worry
* about (the, arguably pretty small, probability of) accidental
* checksum collisions.
*/
frame->ptr[frame->len - 2] = ~frame->ptr[frame->len - 2];
}
#else /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
static inline bool ssh_ptl_should_drop_packet(struct ssh_packet *packet)
{
return false;
}
static inline int ssh_ptl_write_buf(struct ssh_ptl *ptl,
struct ssh_packet *packet,
const unsigned char *buf,
size_t count)
{
return serdev_device_write_buf(ptl->serdev, buf, count);
}
static inline void ssh_ptl_tx_inject_invalid_data(struct ssh_packet *packet)
{
}
static inline void ssh_ptl_rx_inject_invalid_syn(struct ssh_ptl *ptl,
struct ssam_span *data)
{
}
static inline void ssh_ptl_rx_inject_invalid_data(struct ssh_ptl *ptl,
struct ssam_span *frame)
{
}
#endif /* CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION */
static void __ssh_ptl_packet_release(struct kref *kref)
{
struct ssh_packet *p = container_of(kref, struct ssh_packet, refcnt);
trace_ssam_packet_release(p);
ptl_dbg_cond(p->ptl, "ptl: releasing packet %p\n", p);
p->ops->release(p);
}
/**
* ssh_packet_get() - Increment reference count of packet.
* @packet: The packet to increment the reference count of.
*
* Increments the reference count of the given packet. See ssh_packet_put()
* for the counter-part of this function.
*
* Return: Returns the packet provided as input.
*/
struct ssh_packet *ssh_packet_get(struct ssh_packet *packet)
{
if (packet)
kref_get(&packet->refcnt);
return packet;
}
EXPORT_SYMBOL_GPL(ssh_packet_get);
/**
* ssh_packet_put() - Decrement reference count of packet.
* @packet: The packet to decrement the reference count of.
*
* If the reference count reaches zero, the ``release`` callback specified in
* the packet's &struct ssh_packet_ops, i.e. ``packet->ops->release``, will be
* called.
*
* See ssh_packet_get() for the counter-part of this function.
*/
void ssh_packet_put(struct ssh_packet *packet)
{
if (packet)
kref_put(&packet->refcnt, __ssh_ptl_packet_release);
}
EXPORT_SYMBOL_GPL(ssh_packet_put);
static u8 ssh_packet_get_seq(struct ssh_packet *packet)
{
return packet->data.ptr[SSH_MSGOFFSET_FRAME(seq)];
}
/**
* ssh_packet_init() - Initialize SSH packet.
* @packet: The packet to initialize.
* @type: Type-flags of the packet.
* @priority: Priority of the packet. See SSH_PACKET_PRIORITY() for details.
* @ops: Packet operations.
*
* Initializes the given SSH packet. Sets the transmission buffer pointer to
* %NULL and the transmission buffer length to zero. For data-type packets,
* this buffer has to be set separately via ssh_packet_set_data() before
* submission, and must contain a valid SSH message, i.e. frame with optional
* payload of any type.
*/
void ssh_packet_init(struct ssh_packet *packet, unsigned long type,
u8 priority, const struct ssh_packet_ops *ops)
{
kref_init(&packet->refcnt);
packet->ptl = NULL;
INIT_LIST_HEAD(&packet->queue_node);
INIT_LIST_HEAD(&packet->pending_node);
packet->state = type & SSH_PACKET_FLAGS_TY_MASK;
packet->priority = priority;
packet->timestamp = KTIME_MAX;
packet->data.ptr = NULL;
packet->data.len = 0;
packet->ops = ops;
}
static struct kmem_cache *ssh_ctrl_packet_cache;
/**
* ssh_ctrl_packet_cache_init() - Initialize the control packet cache.
*/
int ssh_ctrl_packet_cache_init(void)
{
const unsigned int size = sizeof(struct ssh_packet) + SSH_MSG_LEN_CTRL;
const unsigned int align = __alignof__(struct ssh_packet);
struct kmem_cache *cache;
cache = kmem_cache_create("ssam_ctrl_packet", size, align, 0, NULL);
if (!cache)
return -ENOMEM;
ssh_ctrl_packet_cache = cache;
return 0;
}
/**
* ssh_ctrl_packet_cache_destroy() - Deinitialize the control packet cache.
*/
void ssh_ctrl_packet_cache_destroy(void)
{
kmem_cache_destroy(ssh_ctrl_packet_cache);
ssh_ctrl_packet_cache = NULL;
}
/**
* ssh_ctrl_packet_alloc() - Allocate packet from control packet cache.
* @packet: Where the pointer to the newly allocated packet should be stored.
* @buffer: The buffer corresponding to this packet.
* @flags: Flags used for allocation.
*
* Allocates a packet and corresponding transport buffer from the control
* packet cache. Sets the packet's buffer reference to the allocated buffer.
* The packet must be freed via ssh_ctrl_packet_free(), which will also free
* the corresponding buffer. The corresponding buffer must not be freed
* separately. Intended to be used with %ssh_ptl_ctrl_packet_ops as packet
* operations.
*
* Return: Returns zero on success, %-ENOMEM if the allocation failed.
*/
static int ssh_ctrl_packet_alloc(struct ssh_packet **packet,
struct ssam_span *buffer, gfp_t flags)
{
*packet = kmem_cache_alloc(ssh_ctrl_packet_cache, flags);
if (!*packet)
return -ENOMEM;
buffer->ptr = (u8 *)(*packet + 1);
buffer->len = SSH_MSG_LEN_CTRL;
trace_ssam_ctrl_packet_alloc(*packet, buffer->len);
return 0;
}
/**
* ssh_ctrl_packet_free() - Free packet allocated from control packet cache.
* @p: The packet to free.
*/
static void ssh_ctrl_packet_free(struct ssh_packet *p)
{
trace_ssam_ctrl_packet_free(p);
kmem_cache_free(ssh_ctrl_packet_cache, p);
}
static const struct ssh_packet_ops ssh_ptl_ctrl_packet_ops = {
.complete = NULL,
.release = ssh_ctrl_packet_free,
};
static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
ktime_t expires)
{
unsigned long delta = msecs_to_jiffies(ktime_ms_delta(expires, now));
ktime_t aexp = ktime_add(expires, SSH_PTL_PACKET_TIMEOUT_RESOLUTION);
spin_lock(&ptl->rtx_timeout.lock);
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
ptl->rtx_timeout.expires = expires;
mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
}
spin_unlock(&ptl->rtx_timeout.lock);
}
/* Must be called with queue lock held. */
static void ssh_packet_next_try(struct ssh_packet *p)
{
u8 base = ssh_packet_priority_get_base(p->priority);
u8 try = ssh_packet_priority_get_try(p->priority);
lockdep_assert_held(&p->ptl->queue.lock);
/*
* Ensure that we write the priority in one go via WRITE_ONCE() so we
* can access it via READ_ONCE() for tracing. Note that other access
* is guarded by the queue lock, so no need to use READ_ONCE() there.
*/
WRITE_ONCE(p->priority, __SSH_PACKET_PRIORITY(base, try + 1));
}
/* Must be called with queue lock held. */
static struct list_head *__ssh_ptl_queue_find_entrypoint(struct ssh_packet *p)
{
struct list_head *head;
struct ssh_packet *q;
lockdep_assert_held(&p->ptl->queue.lock);
/*
* We generally assume that there are less control (ACK/NAK) packets
* and re-submitted data packets as there are normal data packets (at
* least in situations in which many packets are queued; if there
* aren't many packets queued the decision on how to iterate should be
* basically irrelevant; the number of control/data packets is more or
* less limited via the maximum number of pending packets). Thus, when
* inserting a control or re-submitted data packet, (determined by
* their priority), we search from front to back. Normal data packets
* are, usually queued directly at the tail of the queue, so for those
* search from back to front.
*/
if (p->priority > SSH_PACKET_PRIORITY(DATA, 0)) {
list_for_each(head, &p->ptl->queue.head) {
q = list_entry(head, struct ssh_packet, queue_node);
if (q->priority < p->priority)
break;
}
} else {
list_for_each_prev(head, &p->ptl->queue.head) {
q = list_entry(head, struct ssh_packet, queue_node);
if (q->priority >= p->priority) {
head = head->next;
break;
}
}
}
return head;
}
/* Must be called with queue lock held. */
static int __ssh_ptl_queue_push(struct ssh_packet *packet)
{
struct ssh_ptl *ptl = packet->ptl;
struct list_head *head;
lockdep_assert_held(&ptl->queue.lock);
if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
return -ESHUTDOWN;
/* Avoid further transitions when canceling/completing. */
if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state))
return -EINVAL;
/* If this packet has already been queued, do not add it. */
if (test_and_set_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state))
return -EALREADY;
head = __ssh_ptl_queue_find_entrypoint(packet);
list_add_tail(&ssh_packet_get(packet)->queue_node, head);
return 0;
}
static int ssh_ptl_queue_push(struct ssh_packet *packet)
{
int status;
spin_lock(&packet->ptl->queue.lock);
status = __ssh_ptl_queue_push(packet);
spin_unlock(&packet->ptl->queue.lock);
return status;
}
static void ssh_ptl_queue_remove(struct ssh_packet *packet)
{
struct ssh_ptl *ptl = packet->ptl;
spin_lock(&ptl->queue.lock);
if (!test_and_clear_bit(SSH_PACKET_SF_QUEUED_BIT, &packet->state)) {
spin_unlock(&ptl->queue.lock);
return;
}
list_del(&packet->queue_node);
spin_unlock(&ptl->queue.lock);
ssh_packet_put(packet);
}
static void ssh_ptl_pending_push(struct ssh_packet *p)
{
struct ssh_ptl *ptl = p->ptl;
const ktime_t timestamp = ktime_get_coarse_boottime();
const ktime_t timeout = ptl->rtx_timeout.timeout;
/*
* Note: We can get the time for the timestamp before acquiring the
* lock as this is the only place we're setting it and this function
* is called only from the transmitter thread. Thus it is not possible
* to overwrite the timestamp with an outdated value below.
*/
spin_lock(&ptl->pending.lock);
/* If we are canceling/completing this packet, do not add it. */
if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state)) {
spin_unlock(&ptl->pending.lock);
return;
}
/*
* On re-submission, the packet has already been added the pending
* set. We still need to update the timestamp as the packet timeout is
* reset for each (re-)submission.
*/
p->timestamp = timestamp;
/* In case it is already pending (e.g. re-submission), do not add it. */
if (!test_and_set_bit(SSH_PACKET_SF_PENDING_BIT, &p->state)) {
atomic_inc(&ptl->pending.count);
list_add_tail(&ssh_packet_get(p)->pending_node, &ptl->pending.head);
}
spin_unlock(&ptl->pending.lock);
/* Arm/update timeout reaper. */
ssh_ptl_timeout_reaper_mod(ptl, timestamp, timestamp + timeout);
}
static void ssh_ptl_pending_remove(struct ssh_packet *packet)
{
struct ssh_ptl *ptl = packet->ptl;
spin_lock(&ptl->pending.lock);
if (!test_and_clear_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state)) {
spin_unlock(&ptl->pending.lock);
return;
}
list_del(&packet->pending_node);
atomic_dec(&ptl->pending.count);
spin_unlock(&ptl->pending.lock);
ssh_packet_put(packet);
}
/* Warning: Does not check/set "completed" bit. */
static void __ssh_ptl_complete(struct ssh_packet *p, int status)
{
struct ssh_ptl *ptl = READ_ONCE(p->ptl);
trace_ssam_packet_complete(p, status);
ptl_dbg_cond(ptl, "ptl: completing packet %p (status: %d)\n", p, status);
if (p->ops->complete)
p->ops->complete(p, status);
}
static void ssh_ptl_remove_and_complete(struct ssh_packet *p, int status)
{
/*
* A call to this function should in general be preceded by
* set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->flags) to avoid re-adding the
* packet to the structures it's going to be removed from.
*
* The set_bit call does not need explicit memory barriers as the
* implicit barrier of the test_and_set_bit() call below ensure that the
* flag is visible before we actually attempt to remove the packet.
*/
if (test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
return;
ssh_ptl_queue_remove(p);
ssh_ptl_pending_remove(p);
__ssh_ptl_complete(p, status);
}
static bool ssh_ptl_tx_can_process(struct ssh_packet *packet)
{
struct ssh_ptl *ptl = packet->ptl;
if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &packet->state))
return !atomic_read(&ptl->pending.count);
/* We can always process non-blocking packets. */
if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &packet->state))
return true;
/* If we are already waiting for this packet, send it again. */
if (test_bit(SSH_PACKET_SF_PENDING_BIT, &packet->state))
return true;
/* Otherwise: Check if we have the capacity to send. */
return atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING;
}
static struct ssh_packet *ssh_ptl_tx_pop(struct ssh_ptl *ptl)
{
struct ssh_packet *packet = ERR_PTR(-ENOENT);
struct ssh_packet *p, *n;
spin_lock(&ptl->queue.lock);
list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
/*
* If we are canceling or completing this packet, ignore it.
* It's going to be removed from this queue shortly.
*/
if (test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
continue;
/*
* Packets should be ordered non-blocking/to-be-resent first.
* If we cannot process this packet, assume that we can't
* process any following packet either and abort.
*/
if (!ssh_ptl_tx_can_process(p)) {
packet = ERR_PTR(-EBUSY);
break;
}
/*
* We are allowed to change the state now. Remove it from the
* queue and mark it as being transmitted.
*/
list_del(&p->queue_node);
set_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &p->state);
/* Ensure that state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
/*
* Update number of tries. This directly influences the
* priority in case the packet is re-submitted (e.g. via
* timeout/NAK). Note that all reads and writes to the
* priority after the first submission are guarded by the
* queue lock.
*/
ssh_packet_next_try(p);
packet = p;
break;
}
spin_unlock(&ptl->queue.lock);
return packet;
}
static struct ssh_packet *ssh_ptl_tx_next(struct ssh_ptl *ptl)
{
struct ssh_packet *p;
p = ssh_ptl_tx_pop(ptl);
if (IS_ERR(p))
return p;
if (test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state)) {
ptl_dbg(ptl, "ptl: transmitting sequenced packet %p\n", p);
ssh_ptl_pending_push(p);
} else {
ptl_dbg(ptl, "ptl: transmitting non-sequenced packet %p\n", p);
}
return p;
}
static void ssh_ptl_tx_compl_success(struct ssh_packet *packet)
{
struct ssh_ptl *ptl = packet->ptl;
ptl_dbg(ptl, "ptl: successfully transmitted packet %p\n", packet);
/* Transition state to "transmitted". */
set_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state);
/* Ensure that state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
/* If the packet is unsequenced, we're done: Lock and complete. */
if (!test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &packet->state)) {
set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
ssh_ptl_remove_and_complete(packet, 0);
}
/*
* Notify that a packet transmission has finished. In general we're only
* waiting for one packet (if any), so wake_up_all should be fine.
*/
wake_up_all(&ptl->tx.packet_wq);
}
static void ssh_ptl_tx_compl_error(struct ssh_packet *packet, int status)
{
/* Transmission failure: Lock the packet and try to complete it. */
set_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state);
/* Ensure that state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_TRANSMITTING_BIT, &packet->state);
ptl_err(packet->ptl, "ptl: transmission error: %d\n", status);
ptl_dbg(packet->ptl, "ptl: failed to transmit packet: %p\n", packet);
ssh_ptl_remove_and_complete(packet, status);
/*
* Notify that a packet transmission has finished. In general we're only
* waiting for one packet (if any), so wake_up_all should be fine.
*/
wake_up_all(&packet->ptl->tx.packet_wq);
}
static long ssh_ptl_tx_wait_packet(struct ssh_ptl *ptl)
{
int status;
status = wait_for_completion_interruptible(&ptl->tx.thread_cplt_pkt);
reinit_completion(&ptl->tx.thread_cplt_pkt);
/*
* Ensure completion is cleared before continuing to avoid lost update
* problems.
*/
smp_mb__after_atomic();
return status;
}
static long ssh_ptl_tx_wait_transfer(struct ssh_ptl *ptl, long timeout)
{
long status;
status = wait_for_completion_interruptible_timeout(&ptl->tx.thread_cplt_tx,
timeout);
reinit_completion(&ptl->tx.thread_cplt_tx);
/*
* Ensure completion is cleared before continuing to avoid lost update
* problems.
*/
smp_mb__after_atomic();
return status;
}
static int ssh_ptl_tx_packet(struct ssh_ptl *ptl, struct ssh_packet *packet)
{
long timeout = SSH_PTL_TX_TIMEOUT;
size_t offset = 0;
/* Note: Flush-packets don't have any data. */
if (unlikely(!packet->data.ptr))
return 0;
/* Error injection: drop packet to simulate transmission problem. */
if (ssh_ptl_should_drop_packet(packet))
return 0;
/* Error injection: simulate invalid packet data. */
ssh_ptl_tx_inject_invalid_data(packet);
ptl_dbg(ptl, "tx: sending data (length: %zu)\n", packet->data.len);
print_hex_dump_debug("tx: ", DUMP_PREFIX_OFFSET, 16, 1,
packet->data.ptr, packet->data.len, false);
do {
ssize_t status, len;
u8 *buf;
buf = packet->data.ptr + offset;
len = packet->data.len - offset;
status = ssh_ptl_write_buf(ptl, packet, buf, len);
if (status < 0)
return status;
if (status == len)
return 0;
offset += status;
timeout = ssh_ptl_tx_wait_transfer(ptl, timeout);
if (kthread_should_stop() || !atomic_read(&ptl->tx.running))
return -ESHUTDOWN;
if (timeout < 0)
return -EINTR;
if (timeout == 0)
return -ETIMEDOUT;
} while (true);
}
static int ssh_ptl_tx_threadfn(void *data)
{
struct ssh_ptl *ptl = data;
while (!kthread_should_stop() && atomic_read(&ptl->tx.running)) {
struct ssh_packet *packet;
int status;
/* Try to get the next packet. */
packet = ssh_ptl_tx_next(ptl);
/* If no packet can be processed, we are done. */
if (IS_ERR(packet)) {
ssh_ptl_tx_wait_packet(ptl);
continue;
}
/* Transfer and complete packet. */
status = ssh_ptl_tx_packet(ptl, packet);
if (status)
ssh_ptl_tx_compl_error(packet, status);
else
ssh_ptl_tx_compl_success(packet);
ssh_packet_put(packet);
}
return 0;
}
/**
* ssh_ptl_tx_wakeup_packet() - Wake up packet transmitter thread for new
* packet.
* @ptl: The packet transport layer.
*
* Wakes up the packet transmitter thread, notifying it that a new packet has
* arrived and is ready for transfer. If the packet transport layer has been
* shut down, calls to this function will be ignored.
*/
static void ssh_ptl_tx_wakeup_packet(struct ssh_ptl *ptl)
{
if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
return;
complete(&ptl->tx.thread_cplt_pkt);
}
/**
* ssh_ptl_tx_start() - Start packet transmitter thread.
* @ptl: The packet transport layer.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssh_ptl_tx_start(struct ssh_ptl *ptl)
{
atomic_set_release(&ptl->tx.running, 1);
ptl->tx.thread = kthread_run(ssh_ptl_tx_threadfn, ptl, "ssam_serial_hub-tx");
if (IS_ERR(ptl->tx.thread))
return PTR_ERR(ptl->tx.thread);
return 0;
}
/**
* ssh_ptl_tx_stop() - Stop packet transmitter thread.
* @ptl: The packet transport layer.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssh_ptl_tx_stop(struct ssh_ptl *ptl)
{
int status = 0;
if (!IS_ERR_OR_NULL(ptl->tx.thread)) {
/* Tell thread to stop. */
atomic_set_release(&ptl->tx.running, 0);
/*
* Wake up thread in case it is paused. Do not use wakeup
* helpers as this may be called when the shutdown bit has
* already been set.
*/
complete(&ptl->tx.thread_cplt_pkt);
complete(&ptl->tx.thread_cplt_tx);
/* Finally, wait for thread to stop. */
status = kthread_stop(ptl->tx.thread);
ptl->tx.thread = NULL;
}
return status;
}
static struct ssh_packet *ssh_ptl_ack_pop(struct ssh_ptl *ptl, u8 seq_id)
{
struct ssh_packet *packet = ERR_PTR(-ENOENT);
struct ssh_packet *p, *n;
spin_lock(&ptl->pending.lock);
list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
/*
* We generally expect packets to be in order, so first packet
* to be added to pending is first to be sent, is first to be
* ACKed.
*/
if (unlikely(ssh_packet_get_seq(p) != seq_id))
continue;
/*
* In case we receive an ACK while handling a transmission
* error completion. The packet will be removed shortly.
*/
if (unlikely(test_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
packet = ERR_PTR(-EPERM);
break;
}
/*
* Mark the packet as ACKed and remove it from pending by
* removing its node and decrementing the pending counter.
*/
set_bit(SSH_PACKET_SF_ACKED_BIT, &p->state);
/* Ensure that state never gets zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
atomic_dec(&ptl->pending.count);
list_del(&p->pending_node);
packet = p;
break;
}
spin_unlock(&ptl->pending.lock);
return packet;
}
static void ssh_ptl_wait_until_transmitted(struct ssh_packet *packet)
{
wait_event(packet->ptl->tx.packet_wq,
test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &packet->state) ||
test_bit(SSH_PACKET_SF_LOCKED_BIT, &packet->state));
}
static void ssh_ptl_acknowledge(struct ssh_ptl *ptl, u8 seq)
{
struct ssh_packet *p;
p = ssh_ptl_ack_pop(ptl, seq);
if (IS_ERR(p)) {
if (PTR_ERR(p) == -ENOENT) {
/*
* The packet has not been found in the set of pending
* packets.
*/
ptl_warn(ptl, "ptl: received ACK for non-pending packet\n");
} else {
/*
* The packet is pending, but we are not allowed to take
* it because it has been locked.
*/
WARN_ON(PTR_ERR(p) != -EPERM);
}
return;
}
ptl_dbg(ptl, "ptl: received ACK for packet %p\n", p);
/*
* It is possible that the packet has been transmitted, but the state
* has not been updated from "transmitting" to "transmitted" yet.
* In that case, we need to wait for this transition to occur in order
* to determine between success or failure.
*
* On transmission failure, the packet will be locked after this call.
* On success, the transmitted bit will be set.
*/
ssh_ptl_wait_until_transmitted(p);
/*
* The packet will already be locked in case of a transmission error or
* cancellation. Let the transmitter or cancellation issuer complete the
* packet.
*/
if (unlikely(test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))) {
if (unlikely(!test_bit(SSH_PACKET_SF_TRANSMITTED_BIT, &p->state)))
ptl_err(ptl, "ptl: received ACK before packet had been fully transmitted\n");
ssh_packet_put(p);
return;
}
ssh_ptl_remove_and_complete(p, 0);
ssh_packet_put(p);
if (atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING)
ssh_ptl_tx_wakeup_packet(ptl);
}
/**
* ssh_ptl_submit() - Submit a packet to the transport layer.
* @ptl: The packet transport layer to submit the packet to.
* @p: The packet to submit.
*
* Submits a new packet to the transport layer, queuing it to be sent. This
* function should not be used for re-submission.
*
* Return: Returns zero on success, %-EINVAL if a packet field is invalid or
* the packet has been canceled prior to submission, %-EALREADY if the packet
* has already been submitted, or %-ESHUTDOWN if the packet transport layer
* has been shut down.
*/
int ssh_ptl_submit(struct ssh_ptl *ptl, struct ssh_packet *p)
{
struct ssh_ptl *ptl_old;
int status;
trace_ssam_packet_submit(p);
/* Validate packet fields. */
if (test_bit(SSH_PACKET_TY_FLUSH_BIT, &p->state)) {
if (p->data.ptr || test_bit(SSH_PACKET_TY_SEQUENCED_BIT, &p->state))
return -EINVAL;
} else if (!p->data.ptr) {
return -EINVAL;
}
/*
* The ptl reference only gets set on or before the first submission.
* After the first submission, it has to be read-only.
*
* Note that ptl may already be set from upper-layer request
* submission, thus we cannot expect it to be NULL.
*/
ptl_old = READ_ONCE(p->ptl);
if (!ptl_old)
WRITE_ONCE(p->ptl, ptl);
else if (WARN_ON(ptl_old != ptl))
return -EALREADY; /* Submitted on different PTL. */
status = ssh_ptl_queue_push(p);
if (status)
return status;
if (!test_bit(SSH_PACKET_TY_BLOCKING_BIT, &p->state) ||
(atomic_read(&ptl->pending.count) < SSH_PTL_MAX_PENDING))
ssh_ptl_tx_wakeup_packet(ptl);
return 0;
}
/*
* __ssh_ptl_resubmit() - Re-submit a packet to the transport layer.
* @packet: The packet to re-submit.
*
* Re-submits the given packet: Checks if it can be re-submitted and queues it
* if it can, resetting the packet timestamp in the process. Must be called
* with the pending lock held.
*
* Return: Returns %-ECANCELED if the packet has exceeded its number of tries,
* %-EINVAL if the packet has been locked, %-EALREADY if the packet is already
* on the queue, and %-ESHUTDOWN if the transmission layer has been shut down.
*/
static int __ssh_ptl_resubmit(struct ssh_packet *packet)
{
int status;
u8 try;
lockdep_assert_held(&packet->ptl->pending.lock);
trace_ssam_packet_resubmit(packet);
spin_lock(&packet->ptl->queue.lock);
/* Check if the packet is out of tries. */
try = ssh_packet_priority_get_try(packet->priority);
if (try >= SSH_PTL_MAX_PACKET_TRIES) {
spin_unlock(&packet->ptl->queue.lock);
return -ECANCELED;
}
status = __ssh_ptl_queue_push(packet);
if (status) {
/*
* An error here indicates that the packet has either already
* been queued, been locked, or the transport layer is being
* shut down. In all cases: Ignore the error.
*/
spin_unlock(&packet->ptl->queue.lock);
return status;
}
packet->timestamp = KTIME_MAX;
spin_unlock(&packet->ptl->queue.lock);
return 0;
}
static void ssh_ptl_resubmit_pending(struct ssh_ptl *ptl)
{
struct ssh_packet *p;
bool resub = false;
/*
* Note: We deliberately do not remove/attempt to cancel and complete
* packets that are out of tires in this function. The packet will be
* eventually canceled and completed by the timeout. Removing the packet
* here could lead to overly eager cancellation if the packet has not
* been re-transmitted yet but the tries-counter already updated (i.e
* ssh_ptl_tx_next() removed the packet from the queue and updated the
* counter, but re-transmission for the last try has not actually
* started yet).
*/
spin_lock(&ptl->pending.lock);
/* Re-queue all pending packets. */
list_for_each_entry(p, &ptl->pending.head, pending_node) {
/*
* Re-submission fails if the packet is out of tries, has been
* locked, is already queued, or the layer is being shut down.
* No need to re-schedule tx-thread in those cases.
*/
if (!__ssh_ptl_resubmit(p))
resub = true;
}
spin_unlock(&ptl->pending.lock);
if (resub)
ssh_ptl_tx_wakeup_packet(ptl);
}
/**
* ssh_ptl_cancel() - Cancel a packet.
* @p: The packet to cancel.
*
* Cancels a packet. There are no guarantees on when completion and release
* callbacks will be called. This may occur during execution of this function
* or may occur at any point later.
*
* Note that it is not guaranteed that the packet will actually be canceled if
* the packet is concurrently completed by another process. The only guarantee
* of this function is that the packet will be completed (with success,
* failure, or cancellation) and released from the transport layer in a
* reasonable time-frame.
*
* May be called before the packet has been submitted, in which case any later
* packet submission fails.
*/
void ssh_ptl_cancel(struct ssh_packet *p)
{
if (test_and_set_bit(SSH_PACKET_SF_CANCELED_BIT, &p->state))
return;
trace_ssam_packet_cancel(p);
/*
* Lock packet and commit with memory barrier. If this packet has
* already been locked, it's going to be removed and completed by
* another party, which should have precedence.
*/
if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
return;
/*
* By marking the packet as locked and employing the implicit memory
* barrier of test_and_set_bit, we have guaranteed that, at this point,
* the packet cannot be added to the queue any more.
*
* In case the packet has never been submitted, packet->ptl is NULL. If
* the packet is currently being submitted, packet->ptl may be NULL or
* non-NULL. Due marking the packet as locked above and committing with
* the memory barrier, we have guaranteed that, if packet->ptl is NULL,
* the packet will never be added to the queue. If packet->ptl is
* non-NULL, we don't have any guarantees.
*/
if (READ_ONCE(p->ptl)) {
ssh_ptl_remove_and_complete(p, -ECANCELED);
if (atomic_read(&p->ptl->pending.count) < SSH_PTL_MAX_PENDING)
ssh_ptl_tx_wakeup_packet(p->ptl);
} else if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
__ssh_ptl_complete(p, -ECANCELED);
}
}
/* Must be called with pending lock held */
static ktime_t ssh_packet_get_expiration(struct ssh_packet *p, ktime_t timeout)
{
lockdep_assert_held(&p->ptl->pending.lock);
if (p->timestamp != KTIME_MAX)
return ktime_add(p->timestamp, timeout);
else
return KTIME_MAX;
}
static void ssh_ptl_timeout_reap(struct work_struct *work)
{
struct ssh_ptl *ptl = to_ssh_ptl(work, rtx_timeout.reaper.work);
struct ssh_packet *p, *n;
LIST_HEAD(claimed);
ktime_t now = ktime_get_coarse_boottime();
ktime_t timeout = ptl->rtx_timeout.timeout;
ktime_t next = KTIME_MAX;
bool resub = false;
int status;
trace_ssam_ptl_timeout_reap(atomic_read(&ptl->pending.count));
/*
* Mark reaper as "not pending". This is done before checking any
* packets to avoid lost-update type problems.
*/
spin_lock(&ptl->rtx_timeout.lock);
ptl->rtx_timeout.expires = KTIME_MAX;
spin_unlock(&ptl->rtx_timeout.lock);
spin_lock(&ptl->pending.lock);
list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
ktime_t expires = ssh_packet_get_expiration(p, timeout);
/*
* Check if the timeout hasn't expired yet. Find out next
* expiration date to be handled after this run.
*/
if (ktime_after(expires, now)) {
next = ktime_before(expires, next) ? expires : next;
continue;
}
trace_ssam_packet_timeout(p);
status = __ssh_ptl_resubmit(p);
/*
* Re-submission fails if the packet is out of tries, has been
* locked, is already queued, or the layer is being shut down.
* No need to re-schedule tx-thread in those cases.
*/
if (!status)
resub = true;
/* Go to next packet if this packet is not out of tries. */
if (status != -ECANCELED)
continue;
/* No more tries left: Cancel the packet. */
/*
* If someone else has locked the packet already, don't use it
* and let the other party complete it.
*/
if (test_and_set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state))
continue;
/*
* We have now marked the packet as locked. Thus it cannot be
* added to the pending list again after we've removed it here.
* We can therefore re-use the pending_node of this packet
* temporarily.
*/
clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
atomic_dec(&ptl->pending.count);
list_move_tail(&p->pending_node, &claimed);
}
spin_unlock(&ptl->pending.lock);
/* Cancel and complete the packet. */
list_for_each_entry_safe(p, n, &claimed, pending_node) {
if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state)) {
ssh_ptl_queue_remove(p);
__ssh_ptl_complete(p, -ETIMEDOUT);
}
/*
* Drop the reference we've obtained by removing it from
* the pending set.
*/
list_del(&p->pending_node);
ssh_packet_put(p);
}
/* Ensure that reaper doesn't run again immediately. */
next = max(next, ktime_add(now, SSH_PTL_PACKET_TIMEOUT_RESOLUTION));
if (next != KTIME_MAX)
ssh_ptl_timeout_reaper_mod(ptl, now, next);
if (resub)
ssh_ptl_tx_wakeup_packet(ptl);
}
static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame)
{
int i;
/*
* Ignore unsequenced packets. On some devices (notably Surface Pro 9),
* unsequenced events will always be sent with SEQ=0x00. Attempting to
* detect retransmission would thus just block all events.
*
* While sequence numbers would also allow detection of retransmitted
* packets in unsequenced communication, they have only ever been used
* to cover edge-cases in sequenced transmission. In particular, the
* only instance of packets being retransmitted (that we are aware of)
* is due to an ACK timeout. As this does not happen in unsequenced
* communication, skip the retransmission check for those packets
* entirely.
*/
if (frame->type == SSH_FRAME_TYPE_DATA_NSQ)
return false;
/*
* Check if SEQ has been seen recently (i.e. packet was
* re-transmitted and we should ignore it).
*/
for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) {
if (likely(ptl->rx.blocked.seqs[i] != frame->seq))
continue;
ptl_dbg(ptl, "ptl: ignoring repeated data packet\n");
return true;
}
/* Update list of blocked sequence IDs. */
ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq;
ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1)
% ARRAY_SIZE(ptl->rx.blocked.seqs);
return false;
}
static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl,
const struct ssh_frame *frame,
const struct ssam_span *payload)
{
if (ssh_ptl_rx_retransmit_check(ptl, frame))
return;
ptl->ops.data_received(ptl, payload);
}
static void ssh_ptl_send_ack(struct ssh_ptl *ptl, u8 seq)
{
struct ssh_packet *packet;
struct ssam_span buf;
struct msgbuf msgb;
int status;
status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
if (status) {
ptl_err(ptl, "ptl: failed to allocate ACK packet\n");
return;
}
ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(ACK, 0),
&ssh_ptl_ctrl_packet_ops);
msgb_init(&msgb, buf.ptr, buf.len);
msgb_push_ack(&msgb, seq);
ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
ssh_ptl_submit(ptl, packet);
ssh_packet_put(packet);
}
static void ssh_ptl_send_nak(struct ssh_ptl *ptl)
{
struct ssh_packet *packet;
struct ssam_span buf;
struct msgbuf msgb;
int status;
status = ssh_ctrl_packet_alloc(&packet, &buf, GFP_KERNEL);
if (status) {
ptl_err(ptl, "ptl: failed to allocate NAK packet\n");
return;
}
ssh_packet_init(packet, 0, SSH_PACKET_PRIORITY(NAK, 0),
&ssh_ptl_ctrl_packet_ops);
msgb_init(&msgb, buf.ptr, buf.len);
msgb_push_nak(&msgb);
ssh_packet_set_data(packet, msgb.begin, msgb_bytes_used(&msgb));
ssh_ptl_submit(ptl, packet);
ssh_packet_put(packet);
}
static size_t ssh_ptl_rx_eval(struct ssh_ptl *ptl, struct ssam_span *source)
{
struct ssh_frame *frame;
struct ssam_span payload;
struct ssam_span aligned;
bool syn_found;
int status;
/* Error injection: Modify data to simulate corrupt SYN bytes. */
ssh_ptl_rx_inject_invalid_syn(ptl, source);
/* Find SYN. */
syn_found = sshp_find_syn(source, &aligned);
if (unlikely(aligned.ptr != source->ptr)) {
/*
* We expect aligned.ptr == source->ptr. If this is not the
* case, then aligned.ptr > source->ptr and we've encountered
* some unexpected data where we'd expect the start of a new
* message (i.e. the SYN sequence).
*
* This can happen when a CRC check for the previous message
* failed and we start actively searching for the next one
* (via the call to sshp_find_syn() above), or the first bytes
* of a message got dropped or corrupted.
*
* In any case, we issue a warning, send a NAK to the EC to
* request re-transmission of any data we haven't acknowledged
* yet, and finally, skip everything up to the next SYN
* sequence.
*/
ptl_warn(ptl, "rx: parser: invalid start of frame, skipping\n");
/*
* Notes:
* - This might send multiple NAKs in case the communication
* starts with an invalid SYN and is broken down into multiple
* pieces. This should generally be handled fine, we just
* might receive duplicate data in this case, which is
* detected when handling data frames.
* - This path will also be executed on invalid CRCs: When an
* invalid CRC is encountered, the code below will skip data
* until directly after the SYN. This causes the search for
* the next SYN, which is generally not placed directly after
* the last one.
*
* Open question: Should we send this in case of invalid
* payload CRCs if the frame-type is non-sequential (current
* implementation) or should we drop that frame without
* telling the EC?
*/
ssh_ptl_send_nak(ptl);
}
if (unlikely(!syn_found))
return aligned.ptr - source->ptr;
/* Error injection: Modify data to simulate corruption. */
ssh_ptl_rx_inject_invalid_data(ptl, &aligned);
/* Parse and validate frame. */
status = sshp_parse_frame(&ptl->serdev->dev, &aligned, &frame, &payload,
SSH_PTL_RX_BUF_LEN);
if (status) /* Invalid frame: skip to next SYN. */
return aligned.ptr - source->ptr + sizeof(u16);
if (!frame) /* Not enough data. */
return aligned.ptr - source->ptr;
trace_ssam_rx_frame_received(frame);
switch (frame->type) {
case SSH_FRAME_TYPE_ACK:
ssh_ptl_acknowledge(ptl, frame->seq);
break;
case SSH_FRAME_TYPE_NAK:
ssh_ptl_resubmit_pending(ptl);
break;
case SSH_FRAME_TYPE_DATA_SEQ:
ssh_ptl_send_ack(ptl, frame->seq);
fallthrough;
case SSH_FRAME_TYPE_DATA_NSQ:
ssh_ptl_rx_dataframe(ptl, frame, &payload);
break;
default:
ptl_warn(ptl, "ptl: received frame with unknown type %#04x\n",
frame->type);
break;
}
return aligned.ptr - source->ptr + SSH_MESSAGE_LENGTH(payload.len);
}
static int ssh_ptl_rx_threadfn(void *data)
{
struct ssh_ptl *ptl = data;
while (true) {
struct ssam_span span;
size_t offs = 0;
size_t n;
wait_event_interruptible(ptl->rx.wq,
!kfifo_is_empty(&ptl->rx.fifo) ||
kthread_should_stop());
if (kthread_should_stop())
break;
/* Copy from fifo to evaluation buffer. */
n = sshp_buf_read_from_fifo(&ptl->rx.buf, &ptl->rx.fifo);
ptl_dbg(ptl, "rx: received data (size: %zu)\n", n);
print_hex_dump_debug("rx: ", DUMP_PREFIX_OFFSET, 16, 1,
ptl->rx.buf.ptr + ptl->rx.buf.len - n,
n, false);
/* Parse until we need more bytes or buffer is empty. */
while (offs < ptl->rx.buf.len) {
sshp_buf_span_from(&ptl->rx.buf, offs, &span);
n = ssh_ptl_rx_eval(ptl, &span);
if (n == 0)
break; /* Need more bytes. */
offs += n;
}
/* Throw away the evaluated parts. */
sshp_buf_drop(&ptl->rx.buf, offs);
}
return 0;
}
static void ssh_ptl_rx_wakeup(struct ssh_ptl *ptl)
{
wake_up(&ptl->rx.wq);
}
/**
* ssh_ptl_rx_start() - Start packet transport layer receiver thread.
* @ptl: The packet transport layer.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssh_ptl_rx_start(struct ssh_ptl *ptl)
{
if (ptl->rx.thread)
return 0;
ptl->rx.thread = kthread_run(ssh_ptl_rx_threadfn, ptl,
"ssam_serial_hub-rx");
if (IS_ERR(ptl->rx.thread))
return PTR_ERR(ptl->rx.thread);
return 0;
}
/**
* ssh_ptl_rx_stop() - Stop packet transport layer receiver thread.
* @ptl: The packet transport layer.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssh_ptl_rx_stop(struct ssh_ptl *ptl)
{
int status = 0;
if (ptl->rx.thread) {
status = kthread_stop(ptl->rx.thread);
ptl->rx.thread = NULL;
}
return status;
}
/**
* ssh_ptl_rx_rcvbuf() - Push data from lower-layer transport to the packet
* layer.
* @ptl: The packet transport layer.
* @buf: Pointer to the data to push to the layer.
* @n: Size of the data to push to the layer, in bytes.
*
* Pushes data from a lower-layer transport to the receiver fifo buffer of the
* packet layer and notifies the receiver thread. Calls to this function are
* ignored once the packet layer has been shut down.
*
* Return: Returns the number of bytes transferred (positive or zero) on
* success. Returns %-ESHUTDOWN if the packet layer has been shut down.
*/
int ssh_ptl_rx_rcvbuf(struct ssh_ptl *ptl, const u8 *buf, size_t n)
{
int used;
if (test_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state))
return -ESHUTDOWN;
used = kfifo_in(&ptl->rx.fifo, buf, n);
if (used)
ssh_ptl_rx_wakeup(ptl);
return used;
}
/**
* ssh_ptl_shutdown() - Shut down the packet transport layer.
* @ptl: The packet transport layer.
*
* Shuts down the packet transport layer, removing and canceling all queued
* and pending packets. Packets canceled by this operation will be completed
* with %-ESHUTDOWN as status. Receiver and transmitter threads will be
* stopped.
*
* As a result of this function, the transport layer will be marked as shut
* down. Submission of packets after the transport layer has been shut down
* will fail with %-ESHUTDOWN.
*/
void ssh_ptl_shutdown(struct ssh_ptl *ptl)
{
LIST_HEAD(complete_q);
LIST_HEAD(complete_p);
struct ssh_packet *p, *n;
int status;
/* Ensure that no new packets (including ACK/NAK) can be submitted. */
set_bit(SSH_PTL_SF_SHUTDOWN_BIT, &ptl->state);
/*
* Ensure that the layer gets marked as shut-down before actually
* stopping it. In combination with the check in ssh_ptl_queue_push(),
* this guarantees that no new packets can be added and all already
* queued packets are properly canceled. In combination with the check
* in ssh_ptl_rx_rcvbuf(), this guarantees that received data is
* properly cut off.
*/
smp_mb__after_atomic();
status = ssh_ptl_rx_stop(ptl);
if (status)
ptl_err(ptl, "ptl: failed to stop receiver thread\n");
status = ssh_ptl_tx_stop(ptl);
if (status)
ptl_err(ptl, "ptl: failed to stop transmitter thread\n");
cancel_delayed_work_sync(&ptl->rtx_timeout.reaper);
/*
* At this point, all threads have been stopped. This means that the
* only references to packets from inside the system are in the queue
* and pending set.
*
* Note: We still need locks here because someone could still be
* canceling packets.
*
* Note 2: We can re-use queue_node (or pending_node) if we mark the
* packet as locked an then remove it from the queue (or pending set
* respectively). Marking the packet as locked avoids re-queuing
* (which should already be prevented by having stopped the treads...)
* and not setting QUEUED_BIT (or PENDING_BIT) prevents removal from a
* new list via other threads (e.g. cancellation).
*
* Note 3: There may be overlap between complete_p and complete_q.
* This is handled via test_and_set_bit() on the "completed" flag
* (also handles cancellation).
*/
/* Mark queued packets as locked and move them to complete_q. */
spin_lock(&ptl->queue.lock);
list_for_each_entry_safe(p, n, &ptl->queue.head, queue_node) {
set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
/* Ensure that state does not get zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_QUEUED_BIT, &p->state);
list_move_tail(&p->queue_node, &complete_q);
}
spin_unlock(&ptl->queue.lock);
/* Mark pending packets as locked and move them to complete_p. */
spin_lock(&ptl->pending.lock);
list_for_each_entry_safe(p, n, &ptl->pending.head, pending_node) {
set_bit(SSH_PACKET_SF_LOCKED_BIT, &p->state);
/* Ensure that state does not get zero. */
smp_mb__before_atomic();
clear_bit(SSH_PACKET_SF_PENDING_BIT, &p->state);
list_move_tail(&p->pending_node, &complete_q);
}
atomic_set(&ptl->pending.count, 0);
spin_unlock(&ptl->pending.lock);
/* Complete and drop packets on complete_q. */
list_for_each_entry(p, &complete_q, queue_node) {
if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
__ssh_ptl_complete(p, -ESHUTDOWN);
ssh_packet_put(p);
}
/* Complete and drop packets on complete_p. */
list_for_each_entry(p, &complete_p, pending_node) {
if (!test_and_set_bit(SSH_PACKET_SF_COMPLETED_BIT, &p->state))
__ssh_ptl_complete(p, -ESHUTDOWN);
ssh_packet_put(p);
}
/*
* At this point we have guaranteed that the system doesn't reference
* any packets any more.
*/
}
/**
* ssh_ptl_init() - Initialize packet transport layer.
* @ptl: The packet transport layer to initialize.
* @serdev: The underlying serial device, i.e. the lower-level transport.
* @ops: Packet layer operations.
*
* Initializes the given packet transport layer. Transmitter and receiver
* threads must be started separately via ssh_ptl_tx_start() and
* ssh_ptl_rx_start(), after the packet-layer has been initialized and the
* lower-level transport layer has been set up.
*
* Return: Returns zero on success and a nonzero error code on failure.
*/
int ssh_ptl_init(struct ssh_ptl *ptl, struct serdev_device *serdev,
struct ssh_ptl_ops *ops)
{
int i, status;
ptl->serdev = serdev;
ptl->state = 0;
spin_lock_init(&ptl->queue.lock);
INIT_LIST_HEAD(&ptl->queue.head);
spin_lock_init(&ptl->pending.lock);
INIT_LIST_HEAD(&ptl->pending.head);
atomic_set_release(&ptl->pending.count, 0);
ptl->tx.thread = NULL;
atomic_set(&ptl->tx.running, 0);
init_completion(&ptl->tx.thread_cplt_pkt);
init_completion(&ptl->tx.thread_cplt_tx);
init_waitqueue_head(&ptl->tx.packet_wq);
ptl->rx.thread = NULL;
init_waitqueue_head(&ptl->rx.wq);
spin_lock_init(&ptl->rtx_timeout.lock);
ptl->rtx_timeout.timeout = SSH_PTL_PACKET_TIMEOUT;
ptl->rtx_timeout.expires = KTIME_MAX;
INIT_DELAYED_WORK(&ptl->rtx_timeout.reaper, ssh_ptl_timeout_reap);
ptl->ops = *ops;
/* Initialize list of recent/blocked SEQs with invalid sequence IDs. */
for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++)
ptl->rx.blocked.seqs[i] = U16_MAX;
ptl->rx.blocked.offset = 0;
status = kfifo_alloc(&ptl->rx.fifo, SSH_PTL_RX_FIFO_LEN, GFP_KERNEL);
if (status)
return status;
status = sshp_buf_alloc(&ptl->rx.buf, SSH_PTL_RX_BUF_LEN, GFP_KERNEL);
if (status)
kfifo_free(&ptl->rx.fifo);
return status;
}
/**
* ssh_ptl_destroy() - Deinitialize packet transport layer.
* @ptl: The packet transport layer to deinitialize.
*
* Deinitializes the given packet transport layer and frees resources
* associated with it. If receiver and/or transmitter threads have been
* started, the layer must first be shut down via ssh_ptl_shutdown() before
* this function can be called.
*/
void ssh_ptl_destroy(struct ssh_ptl *ptl)
{
kfifo_free(&ptl->rx.fifo);
sshp_buf_free(&ptl->rx.buf);
}
| linux-master | drivers/platform/surface/aggregator/ssh_packet_layer.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface Serial Hub (SSH) driver for communication with the Surface/System
* Aggregator Module (SSAM/SAM).
*
* Provides access to a SAM-over-SSH connected EC via a controller device.
* Handles communication via requests as well as enabling, disabling, and
* relaying of events.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/serdev.h>
#include <linux/sysfs.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
#include "bus.h"
#include "controller.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
/* -- Static controller reference. ------------------------------------------ */
/*
* Main controller reference. The corresponding lock must be held while
* accessing (reading/writing) the reference.
*/
static struct ssam_controller *__ssam_controller;
static DEFINE_SPINLOCK(__ssam_controller_lock);
/**
* ssam_get_controller() - Get reference to SSAM controller.
*
* Returns a reference to the SSAM controller of the system or %NULL if there
* is none, it hasn't been set up yet, or it has already been unregistered.
* This function automatically increments the reference count of the
* controller, thus the calling party must ensure that ssam_controller_put()
* is called when it doesn't need the controller any more.
*/
struct ssam_controller *ssam_get_controller(void)
{
struct ssam_controller *ctrl;
spin_lock(&__ssam_controller_lock);
ctrl = __ssam_controller;
if (!ctrl)
goto out;
if (WARN_ON(!kref_get_unless_zero(&ctrl->kref)))
ctrl = NULL;
out:
spin_unlock(&__ssam_controller_lock);
return ctrl;
}
EXPORT_SYMBOL_GPL(ssam_get_controller);
/**
* ssam_try_set_controller() - Try to set the main controller reference.
* @ctrl: The controller to which the reference should point.
*
* Set the main controller reference to the given pointer if the reference
* hasn't been set already.
*
* Return: Returns zero on success or %-EEXIST if the reference has already
* been set.
*/
static int ssam_try_set_controller(struct ssam_controller *ctrl)
{
int status = 0;
spin_lock(&__ssam_controller_lock);
if (!__ssam_controller)
__ssam_controller = ctrl;
else
status = -EEXIST;
spin_unlock(&__ssam_controller_lock);
return status;
}
/**
* ssam_clear_controller() - Remove/clear the main controller reference.
*
* Clears the main controller reference, i.e. sets it to %NULL. This function
* should be called before the controller is shut down.
*/
static void ssam_clear_controller(void)
{
spin_lock(&__ssam_controller_lock);
__ssam_controller = NULL;
spin_unlock(&__ssam_controller_lock);
}
/**
* ssam_client_link() - Link an arbitrary client device to the controller.
* @c: The controller to link to.
* @client: The client device.
*
* Link an arbitrary client device to the controller by creating a device link
* between it as consumer and the controller device as provider. This function
* can be used for non-SSAM devices (or SSAM devices not registered as child
* under the controller) to guarantee that the controller is valid for as long
* as the driver of the client device is bound, and that proper suspend and
* resume ordering is guaranteed.
*
* The device link does not have to be destructed manually. It is removed
* automatically once the driver of the client device unbinds.
*
* Return: Returns zero on success, %-ENODEV if the controller is not ready or
* going to be removed soon, or %-ENOMEM if the device link could not be
* created for other reasons.
*/
int ssam_client_link(struct ssam_controller *c, struct device *client)
{
const u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
struct device *ctrldev;
ssam_controller_statelock(c);
if (c->state != SSAM_CONTROLLER_STARTED) {
ssam_controller_stateunlock(c);
return -ENODEV;
}
ctrldev = ssam_controller_device(c);
if (!ctrldev) {
ssam_controller_stateunlock(c);
return -ENODEV;
}
link = device_link_add(client, ctrldev, flags);
if (!link) {
ssam_controller_stateunlock(c);
return -ENOMEM;
}
/*
* Return -ENODEV if supplier driver is on its way to be removed. In
* this case, the controller won't be around for much longer and the
* device link is not going to save us any more, as unbinding is
* already in progress.
*/
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) {
ssam_controller_stateunlock(c);
return -ENODEV;
}
ssam_controller_stateunlock(c);
return 0;
}
EXPORT_SYMBOL_GPL(ssam_client_link);
/**
* ssam_client_bind() - Bind an arbitrary client device to the controller.
* @client: The client device.
*
* Link an arbitrary client device to the controller by creating a device link
* between it as consumer and the main controller device as provider. This
* function can be used for non-SSAM devices to guarantee that the controller
* returned by this function is valid for as long as the driver of the client
* device is bound, and that proper suspend and resume ordering is guaranteed.
*
* This function does essentially the same as ssam_client_link(), except that
* it first fetches the main controller reference, then creates the link, and
* finally returns this reference. Note that this function does not increment
* the reference counter of the controller, as, due to the link, the
* controller lifetime is assured as long as the driver of the client device
* is bound.
*
* It is not valid to use the controller reference obtained by this method
* outside of the driver bound to the client device at the time of calling
* this function, without first incrementing the reference count of the
* controller via ssam_controller_get(). Even after doing this, care must be
* taken that requests are only submitted and notifiers are only
* (un-)registered when the controller is active and not suspended. In other
* words: The device link only lives as long as the client driver is bound and
* any guarantees enforced by this link (e.g. active controller state) can
* only be relied upon as long as this link exists and may need to be enforced
* in other ways afterwards.
*
* The created device link does not have to be destructed manually. It is
* removed automatically once the driver of the client device unbinds.
*
* Return: Returns the controller on success, an error pointer with %-ENODEV
* if the controller is not present, not ready or going to be removed soon, or
* %-ENOMEM if the device link could not be created for other reasons.
*/
struct ssam_controller *ssam_client_bind(struct device *client)
{
struct ssam_controller *c;
int status;
c = ssam_get_controller();
if (!c)
return ERR_PTR(-ENODEV);
status = ssam_client_link(c, client);
/*
* Note that we can drop our controller reference in both success and
* failure cases: On success, we have bound the controller lifetime
* inherently to the client driver lifetime, i.e. it the controller is
* now guaranteed to outlive the client driver. On failure, we're not
* going to use the controller any more.
*/
ssam_controller_put(c);
return status >= 0 ? c : ERR_PTR(status);
}
EXPORT_SYMBOL_GPL(ssam_client_bind);
/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
static int ssam_receive_buf(struct serdev_device *dev, const unsigned char *buf,
size_t n)
{
struct ssam_controller *ctrl;
ctrl = serdev_device_get_drvdata(dev);
return ssam_controller_receive_buf(ctrl, buf, n);
}
static void ssam_write_wakeup(struct serdev_device *dev)
{
ssam_controller_write_wakeup(serdev_device_get_drvdata(dev));
}
static const struct serdev_device_ops ssam_serdev_ops = {
.receive_buf = ssam_receive_buf,
.write_wakeup = ssam_write_wakeup,
};
/* -- SysFS and misc. ------------------------------------------------------- */
static int ssam_log_firmware_version(struct ssam_controller *ctrl)
{
u32 version, a, b, c;
int status;
status = ssam_get_firmware_version(ctrl, &version);
if (status)
return status;
a = (version >> 24) & 0xff;
b = ((version >> 8) & 0xffff);
c = version & 0xff;
ssam_info(ctrl, "SAM firmware version: %u.%u.%u\n", a, b, c);
return 0;
}
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ssam_controller *ctrl = dev_get_drvdata(dev);
u32 version, a, b, c;
int status;
status = ssam_get_firmware_version(ctrl, &version);
if (status < 0)
return status;
a = (version >> 24) & 0xff;
b = ((version >> 8) & 0xffff);
c = version & 0xff;
return sysfs_emit(buf, "%u.%u.%u\n", a, b, c);
}
static DEVICE_ATTR_RO(firmware_version);
static struct attribute *ssam_sam_attrs[] = {
&dev_attr_firmware_version.attr,
NULL
};
static const struct attribute_group ssam_sam_group = {
.name = "sam",
.attrs = ssam_sam_attrs,
};
/* -- ACPI based device setup. ---------------------------------------------- */
static acpi_status ssam_serdev_setup_via_acpi_crs(struct acpi_resource *rsc,
void *ctx)
{
struct serdev_device *serdev = ctx;
struct acpi_resource_uart_serialbus *uart;
bool flow_control;
int status = 0;
if (!serdev_acpi_get_uart_resource(rsc, &uart))
return AE_OK;
/* Set up serdev device. */
serdev_device_set_baudrate(serdev, uart->default_baud_rate);
/* serdev currently only supports RTSCTS flow control. */
if (uart->flow_control & (~((u8)ACPI_UART_FLOW_CONTROL_HW))) {
dev_warn(&serdev->dev, "setup: unsupported flow control (value: %#04x)\n",
uart->flow_control);
}
/* Set RTSCTS flow control. */
flow_control = uart->flow_control & ACPI_UART_FLOW_CONTROL_HW;
serdev_device_set_flow_control(serdev, flow_control);
/* serdev currently only supports EVEN/ODD parity. */
switch (uart->parity) {
case ACPI_UART_PARITY_NONE:
status = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
break;
case ACPI_UART_PARITY_EVEN:
status = serdev_device_set_parity(serdev, SERDEV_PARITY_EVEN);
break;
case ACPI_UART_PARITY_ODD:
status = serdev_device_set_parity(serdev, SERDEV_PARITY_ODD);
break;
default:
dev_warn(&serdev->dev, "setup: unsupported parity (value: %#04x)\n",
uart->parity);
break;
}
if (status) {
dev_err(&serdev->dev, "setup: failed to set parity (value: %#04x, error: %d)\n",
uart->parity, status);
return AE_ERROR;
}
/* We've found the resource and are done. */
return AE_CTRL_TERMINATE;
}
static acpi_status ssam_serdev_setup_via_acpi(acpi_handle handle,
struct serdev_device *serdev)
{
return acpi_walk_resources(handle, METHOD_NAME__CRS,
ssam_serdev_setup_via_acpi_crs, serdev);
}
/* -- Power management. ----------------------------------------------------- */
static void ssam_serial_hub_shutdown(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* Try to disable notifiers, signal display-off and D0-exit, ignore any
* errors.
*
* Note: It has not been established yet if this is actually
* necessary/useful for shutdown.
*/
status = ssam_notifier_disable_registered(c);
if (status) {
ssam_err(c, "pm: failed to disable notifiers for shutdown: %d\n",
status);
}
status = ssam_ctrl_notif_display_off(c);
if (status)
ssam_err(c, "pm: display-off notification failed: %d\n", status);
status = ssam_ctrl_notif_d0_exit(c);
if (status)
ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
}
#ifdef CONFIG_PM_SLEEP
static int ssam_serial_hub_pm_prepare(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* Try to signal display-off, This will quiesce events.
*
* Note: Signaling display-off/display-on should normally be done from
* some sort of display state notifier. As that is not available,
* signal it here.
*/
status = ssam_ctrl_notif_display_off(c);
if (status)
ssam_err(c, "pm: display-off notification failed: %d\n", status);
return status;
}
static void ssam_serial_hub_pm_complete(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* Try to signal display-on. This will restore events.
*
* Note: Signaling display-off/display-on should normally be done from
* some sort of display state notifier. As that is not available,
* signal it here.
*/
status = ssam_ctrl_notif_display_on(c);
if (status)
ssam_err(c, "pm: display-on notification failed: %d\n", status);
}
static int ssam_serial_hub_pm_suspend(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* Try to signal D0-exit, enable IRQ wakeup if specified. Abort on
* error.
*/
status = ssam_ctrl_notif_d0_exit(c);
if (status) {
ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
goto err_notif;
}
status = ssam_irq_arm_for_wakeup(c);
if (status)
goto err_irq;
WARN_ON(ssam_controller_suspend(c));
return 0;
err_irq:
ssam_ctrl_notif_d0_entry(c);
err_notif:
ssam_ctrl_notif_display_on(c);
return status;
}
static int ssam_serial_hub_pm_resume(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
WARN_ON(ssam_controller_resume(c));
/*
* Try to disable IRQ wakeup (if specified) and signal D0-entry. In
* case of errors, log them and try to restore normal operation state
* as far as possible.
*
* Note: Signaling display-off/display-on should normally be done from
* some sort of display state notifier. As that is not available,
* signal it here.
*/
ssam_irq_disarm_wakeup(c);
status = ssam_ctrl_notif_d0_entry(c);
if (status)
ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
return 0;
}
static int ssam_serial_hub_pm_freeze(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* During hibernation image creation, we only have to ensure that the
* EC doesn't send us any events. This is done via the display-off
* and D0-exit notifications. Note that this sets up the wakeup IRQ
* on the EC side, however, we have disabled it by default on our side
* and won't enable it here.
*
* See ssam_serial_hub_poweroff() for more details on the hibernation
* process.
*/
status = ssam_ctrl_notif_d0_exit(c);
if (status) {
ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
ssam_ctrl_notif_display_on(c);
return status;
}
WARN_ON(ssam_controller_suspend(c));
return 0;
}
static int ssam_serial_hub_pm_thaw(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
WARN_ON(ssam_controller_resume(c));
status = ssam_ctrl_notif_d0_entry(c);
if (status)
ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
return status;
}
static int ssam_serial_hub_pm_poweroff(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* When entering hibernation and powering off the system, the EC, at
* least on some models, may disable events. Without us taking care of
* that, this leads to events not being enabled/restored when the
* system resumes from hibernation, resulting SAM-HID subsystem devices
* (i.e. keyboard, touchpad) not working, AC-plug/AC-unplug events being
* gone, etc.
*
* To avoid these issues, we disable all registered events here (this is
* likely not actually required) and restore them during the drivers PM
* restore callback.
*
* Wakeup from the EC interrupt is not supported during hibernation,
* so don't arm the IRQ here.
*/
status = ssam_notifier_disable_registered(c);
if (status) {
ssam_err(c, "pm: failed to disable notifiers for hibernation: %d\n",
status);
return status;
}
status = ssam_ctrl_notif_d0_exit(c);
if (status) {
ssam_err(c, "pm: D0-exit notification failed: %d\n", status);
ssam_notifier_restore_registered(c);
return status;
}
WARN_ON(ssam_controller_suspend(c));
return 0;
}
static int ssam_serial_hub_pm_restore(struct device *dev)
{
struct ssam_controller *c = dev_get_drvdata(dev);
int status;
/*
* Ignore but log errors, try to restore state as much as possible in
* case of failures. See ssam_serial_hub_poweroff() for more details on
* the hibernation process.
*/
WARN_ON(ssam_controller_resume(c));
status = ssam_ctrl_notif_d0_entry(c);
if (status)
ssam_err(c, "pm: D0-entry notification failed: %d\n", status);
ssam_notifier_restore_registered(c);
return 0;
}
static const struct dev_pm_ops ssam_serial_hub_pm_ops = {
.prepare = ssam_serial_hub_pm_prepare,
.complete = ssam_serial_hub_pm_complete,
.suspend = ssam_serial_hub_pm_suspend,
.resume = ssam_serial_hub_pm_resume,
.freeze = ssam_serial_hub_pm_freeze,
.thaw = ssam_serial_hub_pm_thaw,
.poweroff = ssam_serial_hub_pm_poweroff,
.restore = ssam_serial_hub_pm_restore,
};
#else /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops ssam_serial_hub_pm_ops = { };
#endif /* CONFIG_PM_SLEEP */
/* -- Device/driver setup. -------------------------------------------------- */
static const struct acpi_gpio_params gpio_ssam_wakeup_int = { 0, 0, false };
static const struct acpi_gpio_params gpio_ssam_wakeup = { 1, 0, false };
static const struct acpi_gpio_mapping ssam_acpi_gpios[] = {
{ "ssam_wakeup-int-gpio", &gpio_ssam_wakeup_int, 1 },
{ "ssam_wakeup-gpio", &gpio_ssam_wakeup, 1 },
{ },
};
static int ssam_serial_hub_probe(struct serdev_device *serdev)
{
struct acpi_device *ssh = ACPI_COMPANION(&serdev->dev);
struct ssam_controller *ctrl;
acpi_status astatus;
int status;
if (gpiod_count(&serdev->dev, NULL) < 0)
return -ENODEV;
status = devm_acpi_dev_add_driver_gpios(&serdev->dev, ssam_acpi_gpios);
if (status)
return status;
/* Allocate controller. */
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
/* Initialize controller. */
status = ssam_controller_init(ctrl, serdev);
if (status)
goto err_ctrl_init;
ssam_controller_lock(ctrl);
/* Set up serdev device. */
serdev_device_set_drvdata(serdev, ctrl);
serdev_device_set_client_ops(serdev, &ssam_serdev_ops);
status = serdev_device_open(serdev);
if (status)
goto err_devopen;
astatus = ssam_serdev_setup_via_acpi(ssh->handle, serdev);
if (ACPI_FAILURE(astatus)) {
status = -ENXIO;
goto err_devinit;
}
/* Start controller. */
status = ssam_controller_start(ctrl);
if (status)
goto err_devinit;
ssam_controller_unlock(ctrl);
/*
* Initial SAM requests: Log version and notify default/init power
* states.
*/
status = ssam_log_firmware_version(ctrl);
if (status)
goto err_initrq;
status = ssam_ctrl_notif_d0_entry(ctrl);
if (status)
goto err_initrq;
status = ssam_ctrl_notif_display_on(ctrl);
if (status)
goto err_initrq;
status = sysfs_create_group(&serdev->dev.kobj, &ssam_sam_group);
if (status)
goto err_initrq;
/* Set up IRQ. */
status = ssam_irq_setup(ctrl);
if (status)
goto err_irq;
/* Finally, set main controller reference. */
status = ssam_try_set_controller(ctrl);
if (WARN_ON(status)) /* Currently, we're the only provider. */
goto err_mainref;
/*
* TODO: The EC can wake up the system via the associated GPIO interrupt
* in multiple situations. One of which is the remaining battery
* capacity falling below a certain threshold. Normally, we should
* use the device_init_wakeup function, however, the EC also seems
* to have other reasons for waking up the system and it seems
* that Windows has additional checks whether the system should be
* resumed. In short, this causes some spurious unwanted wake-ups.
* For now let's thus default power/wakeup to false.
*/
device_set_wakeup_capable(&serdev->dev, true);
acpi_dev_clear_dependencies(ssh);
return 0;
err_mainref:
ssam_irq_free(ctrl);
err_irq:
sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
err_initrq:
ssam_controller_lock(ctrl);
ssam_controller_shutdown(ctrl);
err_devinit:
serdev_device_close(serdev);
err_devopen:
ssam_controller_destroy(ctrl);
ssam_controller_unlock(ctrl);
err_ctrl_init:
kfree(ctrl);
return status;
}
static void ssam_serial_hub_remove(struct serdev_device *serdev)
{
struct ssam_controller *ctrl = serdev_device_get_drvdata(serdev);
int status;
/* Clear static reference so that no one else can get a new one. */
ssam_clear_controller();
/* Disable and free IRQ. */
ssam_irq_free(ctrl);
sysfs_remove_group(&serdev->dev.kobj, &ssam_sam_group);
ssam_controller_lock(ctrl);
/* Remove all client devices. */
ssam_remove_clients(&serdev->dev);
/* Act as if suspending to silence events. */
status = ssam_ctrl_notif_display_off(ctrl);
if (status) {
dev_err(&serdev->dev, "display-off notification failed: %d\n",
status);
}
status = ssam_ctrl_notif_d0_exit(ctrl);
if (status) {
dev_err(&serdev->dev, "D0-exit notification failed: %d\n",
status);
}
/* Shut down controller and remove serdev device reference from it. */
ssam_controller_shutdown(ctrl);
/* Shut down actual transport. */
serdev_device_wait_until_sent(serdev, 0);
serdev_device_close(serdev);
/* Drop our controller reference. */
ssam_controller_unlock(ctrl);
ssam_controller_put(ctrl);
device_set_wakeup_capable(&serdev->dev, false);
}
static const struct acpi_device_id ssam_serial_hub_match[] = {
{ "MSHW0084", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ssam_serial_hub_match);
static struct serdev_device_driver ssam_serial_hub = {
.probe = ssam_serial_hub_probe,
.remove = ssam_serial_hub_remove,
.driver = {
.name = "surface_serial_hub",
.acpi_match_table = ssam_serial_hub_match,
.pm = &ssam_serial_hub_pm_ops,
.shutdown = ssam_serial_hub_shutdown,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
/* -- Module setup. --------------------------------------------------------- */
static int __init ssam_core_init(void)
{
int status;
status = ssam_bus_register();
if (status)
goto err_bus;
status = ssh_ctrl_packet_cache_init();
if (status)
goto err_cpkg;
status = ssam_event_item_cache_init();
if (status)
goto err_evitem;
status = serdev_device_driver_register(&ssam_serial_hub);
if (status)
goto err_register;
return 0;
err_register:
ssam_event_item_cache_destroy();
err_evitem:
ssh_ctrl_packet_cache_destroy();
err_cpkg:
ssam_bus_unregister();
err_bus:
return status;
}
subsys_initcall(ssam_core_init);
static void __exit ssam_core_exit(void)
{
serdev_device_driver_unregister(&ssam_serial_hub);
ssam_event_item_cache_destroy();
ssh_ctrl_packet_cache_destroy();
ssam_bus_unregister();
}
module_exit(ssam_core_exit);
MODULE_AUTHOR("Maximilian Luz <[email protected]>");
MODULE_DESCRIPTION("Subsystem and Surface Serial Hub driver for Surface System Aggregator Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/surface/aggregator/core.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Surface System Aggregator Module bus and device integration.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <linux/device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/surface_aggregator/controller.h>
#include <linux/surface_aggregator/device.h>
#include "bus.h"
#include "controller.h"
/* -- Device and bus functions. --------------------------------------------- */
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ssam_device *sdev = to_ssam_device(dev);
return sysfs_emit(buf, "ssam:d%02Xc%02Xt%02Xi%02Xf%02X\n",
sdev->uid.domain, sdev->uid.category, sdev->uid.target,
sdev->uid.instance, sdev->uid.function);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *ssam_device_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(ssam_device);
static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct ssam_device *sdev = to_ssam_device(dev);
return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
sdev->uid.domain, sdev->uid.category,
sdev->uid.target, sdev->uid.instance,
sdev->uid.function);
}
static void ssam_device_release(struct device *dev)
{
struct ssam_device *sdev = to_ssam_device(dev);
ssam_controller_put(sdev->ctrl);
fwnode_handle_put(sdev->dev.fwnode);
kfree(sdev);
}
const struct device_type ssam_device_type = {
.name = "surface_aggregator_device",
.groups = ssam_device_groups,
.uevent = ssam_device_uevent,
.release = ssam_device_release,
};
EXPORT_SYMBOL_GPL(ssam_device_type);
/**
* ssam_device_alloc() - Allocate and initialize a SSAM client device.
* @ctrl: The controller under which the device should be added.
* @uid: The UID of the device to be added.
*
* Allocates and initializes a new client device. The parent of the device
* will be set to the controller device and the name will be set based on the
* UID. Note that the device still has to be added via ssam_device_add().
* Refer to that function for more details.
*
* Return: Returns the newly allocated and initialized SSAM client device, or
* %NULL if it could not be allocated.
*/
struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
struct ssam_device_uid uid)
{
struct ssam_device *sdev;
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
return NULL;
device_initialize(&sdev->dev);
sdev->dev.bus = &ssam_bus_type;
sdev->dev.type = &ssam_device_type;
sdev->dev.parent = ssam_controller_device(ctrl);
sdev->ctrl = ssam_controller_get(ctrl);
sdev->uid = uid;
dev_set_name(&sdev->dev, "%02x:%02x:%02x:%02x:%02x",
sdev->uid.domain, sdev->uid.category, sdev->uid.target,
sdev->uid.instance, sdev->uid.function);
return sdev;
}
EXPORT_SYMBOL_GPL(ssam_device_alloc);
/**
* ssam_device_add() - Add a SSAM client device.
* @sdev: The SSAM client device to be added.
*
* Added client devices must be guaranteed to always have a valid and active
* controller. Thus, this function will fail with %-ENODEV if the controller
* of the device has not been initialized yet, has been suspended, or has been
* shut down.
*
* The caller of this function should ensure that the corresponding call to
* ssam_device_remove() is issued before the controller is shut down. If the
* added device is a direct child of the controller device (default), it will
* be automatically removed when the controller is shut down.
*
* By default, the controller device will become the parent of the newly
* created client device. The parent may be changed before ssam_device_add is
* called, but care must be taken that a) the correct suspend/resume ordering
* is guaranteed and b) the client device does not outlive the controller,
* i.e. that the device is removed before the controller is being shut down.
* In case these guarantees have to be manually enforced, please refer to the
* ssam_client_link() and ssam_client_bind() functions, which are intended to
* set up device-links for this purpose.
*
* Return: Returns zero on success, a negative error code on failure.
*/
int ssam_device_add(struct ssam_device *sdev)
{
int status;
/*
* Ensure that we can only add new devices to a controller if it has
* been started and is not going away soon. This works in combination
* with ssam_controller_remove_clients to ensure driver presence for the
* controller device, i.e. it ensures that the controller (sdev->ctrl)
* is always valid and can be used for requests as long as the client
* device we add here is registered as child under it. This essentially
* guarantees that the client driver can always expect the preconditions
* for functions like ssam_request_do_sync() (controller has to be
* started and is not suspended) to hold and thus does not have to check
* for them.
*
* Note that for this to work, the controller has to be a parent device.
* If it is not a direct parent, care has to be taken that the device is
* removed via ssam_device_remove(), as device_unregister does not
* remove child devices recursively.
*/
ssam_controller_statelock(sdev->ctrl);
if (sdev->ctrl->state != SSAM_CONTROLLER_STARTED) {
ssam_controller_stateunlock(sdev->ctrl);
return -ENODEV;
}
status = device_add(&sdev->dev);
ssam_controller_stateunlock(sdev->ctrl);
return status;
}
EXPORT_SYMBOL_GPL(ssam_device_add);
/**
* ssam_device_remove() - Remove a SSAM client device.
* @sdev: The device to remove.
*
* Removes and unregisters the provided SSAM client device.
*/
void ssam_device_remove(struct ssam_device *sdev)
{
device_unregister(&sdev->dev);
}
EXPORT_SYMBOL_GPL(ssam_device_remove);
/**
* ssam_device_id_compatible() - Check if a device ID matches a UID.
* @id: The device ID as potential match.
* @uid: The device UID matching against.
*
* Check if the given ID is a match for the given UID, i.e. if a device with
* the provided UID is compatible to the given ID following the match rules
* described in its &ssam_device_id.match_flags member.
*
* Return: Returns %true if the given UID is compatible to the match rule
* described by the given ID, %false otherwise.
*/
static bool ssam_device_id_compatible(const struct ssam_device_id *id,
struct ssam_device_uid uid)
{
if (id->domain != uid.domain || id->category != uid.category)
return false;
if ((id->match_flags & SSAM_MATCH_TARGET) && id->target != uid.target)
return false;
if ((id->match_flags & SSAM_MATCH_INSTANCE) && id->instance != uid.instance)
return false;
if ((id->match_flags & SSAM_MATCH_FUNCTION) && id->function != uid.function)
return false;
return true;
}
/**
* ssam_device_id_is_null() - Check if a device ID is null.
* @id: The device ID to check.
*
* Check if a given device ID is null, i.e. all zeros. Used to check for the
* end of ``MODULE_DEVICE_TABLE(ssam, ...)`` or similar lists.
*
* Return: Returns %true if the given ID represents a null ID, %false
* otherwise.
*/
static bool ssam_device_id_is_null(const struct ssam_device_id *id)
{
return id->match_flags == 0 &&
id->domain == 0 &&
id->category == 0 &&
id->target == 0 &&
id->instance == 0 &&
id->function == 0 &&
id->driver_data == 0;
}
/**
* ssam_device_id_match() - Find the matching ID table entry for the given UID.
* @table: The table to search in.
* @uid: The UID to matched against the individual table entries.
*
* Find the first match for the provided device UID in the provided ID table
* and return it. Returns %NULL if no match could be found.
*/
const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
const struct ssam_device_uid uid)
{
const struct ssam_device_id *id;
for (id = table; !ssam_device_id_is_null(id); ++id)
if (ssam_device_id_compatible(id, uid))
return id;
return NULL;
}
EXPORT_SYMBOL_GPL(ssam_device_id_match);
/**
* ssam_device_get_match() - Find and return the ID matching the device in the
* ID table of the bound driver.
* @dev: The device for which to get the matching ID table entry.
*
* Find the fist match for the UID of the device in the ID table of the
* currently bound driver and return it. Returns %NULL if the device does not
* have a driver bound to it, the driver does not have match_table (i.e. it is
* %NULL), or there is no match in the driver's match_table.
*
* This function essentially calls ssam_device_id_match() with the ID table of
* the bound device driver and the UID of the device.
*
* Return: Returns the first match for the UID of the device in the device
* driver's match table, or %NULL if no such match could be found.
*/
const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev)
{
const struct ssam_device_driver *sdrv;
sdrv = to_ssam_device_driver(dev->dev.driver);
if (!sdrv)
return NULL;
if (!sdrv->match_table)
return NULL;
return ssam_device_id_match(sdrv->match_table, dev->uid);
}
EXPORT_SYMBOL_GPL(ssam_device_get_match);
/**
* ssam_device_get_match_data() - Find the ID matching the device in the
* ID table of the bound driver and return its ``driver_data`` member.
* @dev: The device for which to get the match data.
*
* Find the fist match for the UID of the device in the ID table of the
* corresponding driver and return its driver_data. Returns %NULL if the
* device does not have a driver bound to it, the driver does not have
* match_table (i.e. it is %NULL), there is no match in the driver's
* match_table, or the match does not have any driver_data.
*
* This function essentially calls ssam_device_get_match() and, if any match
* could be found, returns its ``struct ssam_device_id.driver_data`` member.
*
* Return: Returns the driver data associated with the first match for the UID
* of the device in the device driver's match table, or %NULL if no such match
* could be found.
*/
const void *ssam_device_get_match_data(const struct ssam_device *dev)
{
const struct ssam_device_id *id;
id = ssam_device_get_match(dev);
if (!id)
return NULL;
return (const void *)id->driver_data;
}
EXPORT_SYMBOL_GPL(ssam_device_get_match_data);
static int ssam_bus_match(struct device *dev, struct device_driver *drv)
{
struct ssam_device_driver *sdrv = to_ssam_device_driver(drv);
struct ssam_device *sdev = to_ssam_device(dev);
if (!is_ssam_device(dev))
return 0;
return !!ssam_device_id_match(sdrv->match_table, sdev->uid);
}
static int ssam_bus_probe(struct device *dev)
{
return to_ssam_device_driver(dev->driver)
->probe(to_ssam_device(dev));
}
static void ssam_bus_remove(struct device *dev)
{
struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
if (sdrv->remove)
sdrv->remove(to_ssam_device(dev));
}
struct bus_type ssam_bus_type = {
.name = "surface_aggregator",
.match = ssam_bus_match,
.probe = ssam_bus_probe,
.remove = ssam_bus_remove,
};
EXPORT_SYMBOL_GPL(ssam_bus_type);
/**
* __ssam_device_driver_register() - Register a SSAM client device driver.
* @sdrv: The driver to register.
* @owner: The module owning the provided driver.
*
* Please refer to the ssam_device_driver_register() macro for the normal way
* to register a driver from inside its owning module.
*/
int __ssam_device_driver_register(struct ssam_device_driver *sdrv,
struct module *owner)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &ssam_bus_type;
/* force drivers to async probe so I/O is possible in probe */
sdrv->driver.probe_type = PROBE_PREFER_ASYNCHRONOUS;
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__ssam_device_driver_register);
/**
* ssam_device_driver_unregister - Unregister a SSAM device driver.
* @sdrv: The driver to unregister.
*/
void ssam_device_driver_unregister(struct ssam_device_driver *sdrv)
{
driver_unregister(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(ssam_device_driver_unregister);
/* -- Bus registration. ----------------------------------------------------- */
/**
* ssam_bus_register() - Register and set-up the SSAM client device bus.
*/
int ssam_bus_register(void)
{
return bus_register(&ssam_bus_type);
}
/**
* ssam_bus_unregister() - Unregister the SSAM client device bus.
*/
void ssam_bus_unregister(void)
{
return bus_unregister(&ssam_bus_type);
}
/* -- Helpers for controller and hub devices. ------------------------------- */
static int ssam_device_uid_from_string(const char *str, struct ssam_device_uid *uid)
{
u8 d, tc, tid, iid, fn;
int n;
n = sscanf(str, "%hhx:%hhx:%hhx:%hhx:%hhx", &d, &tc, &tid, &iid, &fn);
if (n != 5)
return -EINVAL;
uid->domain = d;
uid->category = tc;
uid->target = tid;
uid->instance = iid;
uid->function = fn;
return 0;
}
static int ssam_get_uid_for_node(struct fwnode_handle *node, struct ssam_device_uid *uid)
{
const char *str = fwnode_get_name(node);
/*
* To simplify definitions of firmware nodes, we set the device name
* based on the UID of the device, prefixed with "ssam:".
*/
if (strncmp(str, "ssam:", strlen("ssam:")) != 0)
return -ENODEV;
str += strlen("ssam:");
return ssam_device_uid_from_string(str, uid);
}
static int ssam_add_client_device(struct device *parent, struct ssam_controller *ctrl,
struct fwnode_handle *node)
{
struct ssam_device_uid uid;
struct ssam_device *sdev;
int status;
status = ssam_get_uid_for_node(node, &uid);
if (status)
return status;
sdev = ssam_device_alloc(ctrl, uid);
if (!sdev)
return -ENOMEM;
sdev->dev.parent = parent;
sdev->dev.fwnode = fwnode_handle_get(node);
status = ssam_device_add(sdev);
if (status)
ssam_device_put(sdev);
return status;
}
/**
* __ssam_register_clients() - Register client devices defined under the
* given firmware node as children of the given device.
* @parent: The parent device under which clients should be registered.
* @ctrl: The controller with which client should be registered.
* @node: The firmware node holding definitions of the devices to be added.
*
* Register all clients that have been defined as children of the given root
* firmware node as children of the given parent device. The respective child
* firmware nodes will be associated with the correspondingly created child
* devices.
*
* The given controller will be used to instantiate the new devices. See
* ssam_device_add() for details.
*
* Note that, generally, the use of either ssam_device_register_clients() or
* ssam_register_clients() should be preferred as they directly use the
* firmware node and/or controller associated with the given device. This
* function is only intended for use when different device specifications (e.g.
* ACPI and firmware nodes) need to be combined (as is done in the platform hub
* of the device registry).
*
* Return: Returns zero on success, nonzero on failure.
*/
int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
struct fwnode_handle *node)
{
struct fwnode_handle *child;
int status;
fwnode_for_each_child_node(node, child) {
/*
* Try to add the device specified in the firmware node. If
* this fails with -ENODEV, the node does not specify any SSAM
* device, so ignore it and continue with the next one.
*/
status = ssam_add_client_device(parent, ctrl, child);
if (status && status != -ENODEV) {
fwnode_handle_put(child);
goto err;
}
}
return 0;
err:
ssam_remove_clients(parent);
return status;
}
EXPORT_SYMBOL_GPL(__ssam_register_clients);
static int ssam_remove_device(struct device *dev, void *_data)
{
struct ssam_device *sdev = to_ssam_device(dev);
if (is_ssam_device(dev))
ssam_device_remove(sdev);
return 0;
}
/**
* ssam_remove_clients() - Remove SSAM client devices registered as direct
* children under the given parent device.
* @dev: The (parent) device to remove all direct clients for.
*
* Remove all SSAM client devices registered as direct children under the given
* device. Note that this only accounts for direct children of the device.
* Refer to ssam_device_add()/ssam_device_remove() for more details.
*/
void ssam_remove_clients(struct device *dev)
{
device_for_each_child_reverse(dev, NULL, ssam_remove_device);
}
EXPORT_SYMBOL_GPL(ssam_remove_clients);
| linux-master | drivers/platform/surface/aggregator/bus.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* SSH message parser.
*
* Copyright (C) 2019-2022 Maximilian Luz <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/surface_aggregator/serial_hub.h>
#include "ssh_parser.h"
/**
* sshp_validate_crc() - Validate a CRC in raw message data.
* @src: The span of data over which the CRC should be computed.
* @crc: The pointer to the expected u16 CRC value.
*
* Computes the CRC of the provided data span (@src), compares it to the CRC
* stored at the given address (@crc), and returns the result of this
* comparison, i.e. %true if equal. This function is intended to run on raw
* input/message data.
*
* Return: Returns %true if the computed CRC matches the stored CRC, %false
* otherwise.
*/
static bool sshp_validate_crc(const struct ssam_span *src, const u8 *crc)
{
u16 actual = ssh_crc(src->ptr, src->len);
u16 expected = get_unaligned_le16(crc);
return actual == expected;
}
/**
* sshp_starts_with_syn() - Check if the given data starts with SSH SYN bytes.
* @src: The data span to check the start of.
*/
static bool sshp_starts_with_syn(const struct ssam_span *src)
{
return src->len >= 2 && get_unaligned_le16(src->ptr) == SSH_MSG_SYN;
}
/**
* sshp_find_syn() - Find SSH SYN bytes in the given data span.
* @src: The data span to search in.
* @rem: The span (output) indicating the remaining data, starting with SSH
* SYN bytes, if found.
*
* Search for SSH SYN bytes in the given source span. If found, set the @rem
* span to the remaining data, starting with the first SYN bytes and capped by
* the source span length, and return %true. This function does not copy any
* data, but rather only sets pointers to the respective start addresses and
* length values.
*
* If no SSH SYN bytes could be found, set the @rem span to the zero-length
* span at the end of the source span and return %false.
*
* If partial SSH SYN bytes could be found at the end of the source span, set
* the @rem span to cover these partial SYN bytes, capped by the end of the
* source span, and return %false. This function should then be re-run once
* more data is available.
*
* Return: Returns %true if a complete SSH SYN sequence could be found,
* %false otherwise.
*/
bool sshp_find_syn(const struct ssam_span *src, struct ssam_span *rem)
{
size_t i;
for (i = 0; i < src->len - 1; i++) {
if (likely(get_unaligned_le16(src->ptr + i) == SSH_MSG_SYN)) {
rem->ptr = src->ptr + i;
rem->len = src->len - i;
return true;
}
}
if (unlikely(src->ptr[src->len - 1] == (SSH_MSG_SYN & 0xff))) {
rem->ptr = src->ptr + src->len - 1;
rem->len = 1;
return false;
}
rem->ptr = src->ptr + src->len;
rem->len = 0;
return false;
}
/**
* sshp_parse_frame() - Parse SSH frame.
* @dev: The device used for logging.
* @source: The source to parse from.
* @frame: The parsed frame (output).
* @payload: The parsed payload (output).
* @maxlen: The maximum supported message length.
*
* Parses and validates a SSH frame, including its payload, from the given
* source. Sets the provided @frame pointer to the start of the frame and
* writes the limits of the frame payload to the provided @payload span
* pointer.
*
* This function does not copy any data, but rather only validates the message
* data and sets pointers (and length values) to indicate the respective parts.
*
* If no complete SSH frame could be found, the frame pointer will be set to
* the %NULL pointer and the payload span will be set to the null span (start
* pointer %NULL, size zero).
*
* Return: Returns zero on success or if the frame is incomplete, %-ENOMSG if
* the start of the message is invalid, %-EBADMSG if any (frame-header or
* payload) CRC is invalid, or %-EMSGSIZE if the SSH message is bigger than
* the maximum message length specified in the @maxlen parameter.
*/
int sshp_parse_frame(const struct device *dev, const struct ssam_span *source,
struct ssh_frame **frame, struct ssam_span *payload,
size_t maxlen)
{
struct ssam_span sf;
struct ssam_span sp;
/* Initialize output. */
*frame = NULL;
payload->ptr = NULL;
payload->len = 0;
if (!sshp_starts_with_syn(source)) {
dev_warn(dev, "rx: parser: invalid start of frame\n");
return -ENOMSG;
}
/* Check for minimum packet length. */
if (unlikely(source->len < SSH_MESSAGE_LENGTH(0))) {
dev_dbg(dev, "rx: parser: not enough data for frame\n");
return 0;
}
/* Pin down frame. */
sf.ptr = source->ptr + sizeof(u16);
sf.len = sizeof(struct ssh_frame);
/* Validate frame CRC. */
if (unlikely(!sshp_validate_crc(&sf, sf.ptr + sf.len))) {
dev_warn(dev, "rx: parser: invalid frame CRC\n");
return -EBADMSG;
}
/* Ensure packet does not exceed maximum length. */
sp.len = get_unaligned_le16(&((struct ssh_frame *)sf.ptr)->len);
if (unlikely(SSH_MESSAGE_LENGTH(sp.len) > maxlen)) {
dev_warn(dev, "rx: parser: frame too large: %llu bytes\n",
SSH_MESSAGE_LENGTH(sp.len));
return -EMSGSIZE;
}
/* Pin down payload. */
sp.ptr = sf.ptr + sf.len + sizeof(u16);
/* Check for frame + payload length. */
if (source->len < SSH_MESSAGE_LENGTH(sp.len)) {
dev_dbg(dev, "rx: parser: not enough data for payload\n");
return 0;
}
/* Validate payload CRC. */
if (unlikely(!sshp_validate_crc(&sp, sp.ptr + sp.len))) {
dev_warn(dev, "rx: parser: invalid payload CRC\n");
return -EBADMSG;
}
*frame = (struct ssh_frame *)sf.ptr;
*payload = sp;
dev_dbg(dev, "rx: parser: valid frame found (type: %#04x, len: %u)\n",
(*frame)->type, (*frame)->len);
return 0;
}
/**
* sshp_parse_command() - Parse SSH command frame payload.
* @dev: The device used for logging.
* @source: The source to parse from.
* @command: The parsed command (output).
* @command_data: The parsed command data/payload (output).
*
* Parses and validates a SSH command frame payload. Sets the @command pointer
* to the command header and the @command_data span to the command data (i.e.
* payload of the command). This will result in a zero-length span if the
* command does not have any associated data/payload. This function does not
* check the frame-payload-type field, which should be checked by the caller
* before calling this function.
*
* The @source parameter should be the complete frame payload, e.g. returned
* by the sshp_parse_frame() command.
*
* This function does not copy any data, but rather only validates the frame
* payload data and sets pointers (and length values) to indicate the
* respective parts.
*
* Return: Returns zero on success or %-ENOMSG if @source does not represent a
* valid command-type frame payload, i.e. is too short.
*/
int sshp_parse_command(const struct device *dev, const struct ssam_span *source,
struct ssh_command **command,
struct ssam_span *command_data)
{
/* Check for minimum length. */
if (unlikely(source->len < sizeof(struct ssh_command))) {
*command = NULL;
command_data->ptr = NULL;
command_data->len = 0;
dev_err(dev, "rx: parser: command payload is too short\n");
return -ENOMSG;
}
*command = (struct ssh_command *)source->ptr;
command_data->ptr = source->ptr + sizeof(struct ssh_command);
command_data->len = source->len - sizeof(struct ssh_command);
dev_dbg(dev, "rx: parser: valid command found (tc: %#04x, cid: %#04x)\n",
(*command)->tc, (*command)->cid);
return 0;
}
| linux-master | drivers/platform/surface/aggregator/ssh_parser.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox register access driver
*
* Copyright (C) 2018 Mellanox Technologies
* Copyright (C) 2018 Vadim Pasternak <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* Attribute parameters. */
#define MLXREG_IO_ATT_SIZE 10
#define MLXREG_IO_ATT_NUM 96
/**
* struct mlxreg_io_priv_data - driver's private data:
*
* @pdev: platform device;
* @pdata: platform data;
* @hwmon: hwmon device;
* @mlxreg_io_attr: sysfs attributes array;
* @mlxreg_io_dev_attr: sysfs sensor device attribute array;
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
* @regsize: size of a register value;
* @io_lock: user access locking;
*/
struct mlxreg_io_priv_data {
struct platform_device *pdev;
struct mlxreg_core_platform_data *pdata;
struct device *hwmon;
struct attribute *mlxreg_io_attr[MLXREG_IO_ATT_NUM + 1];
struct sensor_device_attribute mlxreg_io_dev_attr[MLXREG_IO_ATT_NUM];
struct attribute_group group;
const struct attribute_group *groups[2];
int regsize;
struct mutex io_lock; /* Protects user access. */
};
static int
mlxreg_io_get_reg(void *regmap, struct mlxreg_core_data *data, u32 in_val,
bool rw_flag, int regsize, u32 *regval)
{
int i, val, ret;
ret = regmap_read(regmap, data->reg, regval);
if (ret)
goto access_error;
/*
* There are four kinds of attributes: single bit, full register's
* bits, bit sequence, bits in few registers For the first kind field
* mask indicates which bits are not related and field bit is set zero.
* For the second kind field mask is set to zero and field bit is set
* with all bits one. No special handling for such kind of attributes -
* pass value as is. For the third kind, the field mask indicates which
* bits are related and the field bit is set to the first bit number
* (from 1 to 32) is the bit sequence. For the fourth kind - the number
* of registers which should be read for getting an attribute are
* specified through 'data->regnum' field.
*/
if (!data->bit) {
/* Single bit. */
if (rw_flag) {
/* For show: expose effective bit value as 0 or 1. */
*regval = !!(*regval & ~data->mask);
} else {
/* For store: set effective bit value. */
*regval &= data->mask;
if (in_val)
*regval |= ~data->mask;
}
} else if (data->mask) {
/* Bit sequence. */
if (rw_flag) {
/* For show: mask and shift right. */
*regval = ror32(*regval & data->mask, (data->bit - 1));
} else {
/* For store: shift to the position and mask. */
in_val = rol32(in_val, data->bit - 1) & data->mask;
/* Clear relevant bits and set them to new value. */
*regval = (*regval & ~data->mask) | in_val;
}
} else {
/*
* Some attributes could occupied few registers in case regmap
* bit size is 8 or 16. Compose such attributes from 'regnum'
* registers. Such attributes contain read-only data.
*/
for (i = 1; i < data->regnum; i++) {
ret = regmap_read(regmap, data->reg + i, &val);
if (ret)
goto access_error;
*regval |= rol32(val, regsize * i * 8);
}
}
access_error:
return ret;
}
static ssize_t
mlxreg_io_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
struct mlxreg_core_data *data = priv->pdata->data + index;
u32 regval = 0;
int ret;
mutex_lock(&priv->io_lock);
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, 0, true,
priv->regsize, ®val);
if (ret)
goto access_error;
mutex_unlock(&priv->io_lock);
return sprintf(buf, "%u\n", regval);
access_error:
mutex_unlock(&priv->io_lock);
return ret;
}
static ssize_t
mlxreg_io_attr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct mlxreg_io_priv_data *priv = dev_get_drvdata(dev);
int index = to_sensor_dev_attr(attr)->index;
struct mlxreg_core_data *data = priv->pdata->data + index;
u32 input_val, regval;
int ret;
if (len > MLXREG_IO_ATT_SIZE)
return -EINVAL;
/* Convert buffer to input value. */
ret = kstrtou32(buf, 0, &input_val);
if (ret)
return ret;
mutex_lock(&priv->io_lock);
ret = mlxreg_io_get_reg(priv->pdata->regmap, data, input_val, false,
priv->regsize, ®val);
if (ret)
goto access_error;
ret = regmap_write(priv->pdata->regmap, data->reg, regval);
if (ret)
goto access_error;
mutex_unlock(&priv->io_lock);
return len;
access_error:
mutex_unlock(&priv->io_lock);
dev_err(&priv->pdev->dev, "Bus access error\n");
return ret;
}
static struct device_attribute mlxreg_io_devattr_rw = {
.show = mlxreg_io_attr_show,
.store = mlxreg_io_attr_store,
};
static int mlxreg_io_attr_init(struct mlxreg_io_priv_data *priv)
{
int i;
priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
priv->pdata->counter,
sizeof(struct attribute *),
GFP_KERNEL);
if (!priv->group.attrs)
return -ENOMEM;
for (i = 0; i < priv->pdata->counter; i++) {
priv->mlxreg_io_attr[i] =
&priv->mlxreg_io_dev_attr[i].dev_attr.attr;
memcpy(&priv->mlxreg_io_dev_attr[i].dev_attr,
&mlxreg_io_devattr_rw, sizeof(struct device_attribute));
/* Set attribute name as a label. */
priv->mlxreg_io_attr[i]->name =
devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
priv->pdata->data[i].label);
if (!priv->mlxreg_io_attr[i]->name) {
dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
i + 1);
return -ENOMEM;
}
priv->mlxreg_io_dev_attr[i].dev_attr.attr.mode =
priv->pdata->data[i].mode;
priv->mlxreg_io_dev_attr[i].dev_attr.attr.name =
priv->mlxreg_io_attr[i]->name;
priv->mlxreg_io_dev_attr[i].index = i;
sysfs_attr_init(&priv->mlxreg_io_dev_attr[i].dev_attr.attr);
}
priv->group.attrs = priv->mlxreg_io_attr;
priv->groups[0] = &priv->group;
priv->groups[1] = NULL;
return 0;
}
static int mlxreg_io_probe(struct platform_device *pdev)
{
struct mlxreg_io_priv_data *priv;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->pdata = dev_get_platdata(&pdev->dev);
if (!priv->pdata) {
dev_err(&pdev->dev, "Failed to get platform data.\n");
return -EINVAL;
}
priv->pdev = pdev;
priv->regsize = regmap_get_val_bytes(priv->pdata->regmap);
if (priv->regsize < 0)
return priv->regsize;
err = mlxreg_io_attr_init(priv);
if (err) {
dev_err(&priv->pdev->dev, "Failed to allocate attributes: %d\n",
err);
return err;
}
priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
"mlxreg_io",
priv,
priv->groups);
if (IS_ERR(priv->hwmon)) {
dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
PTR_ERR(priv->hwmon));
return PTR_ERR(priv->hwmon);
}
mutex_init(&priv->io_lock);
dev_set_drvdata(&pdev->dev, priv);
return 0;
}
static int mlxreg_io_remove(struct platform_device *pdev)
{
struct mlxreg_io_priv_data *priv = dev_get_drvdata(&pdev->dev);
mutex_destroy(&priv->io_lock);
return 0;
}
static struct platform_driver mlxreg_io_driver = {
.driver = {
.name = "mlxreg-io",
},
.probe = mlxreg_io_probe,
.remove = mlxreg_io_remove,
};
module_platform_driver(mlxreg_io_driver);
MODULE_AUTHOR("Vadim Pasternak <[email protected]>");
MODULE_DESCRIPTION("Mellanox regmap I/O access driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mlxreg-io");
| linux-master | drivers/platform/mellanox/mlxreg-io.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Nvidia line card driver
*
* Copyright (C) 2020 Nvidia Technologies Ltd.
*/
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_data/mlxcpld.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* I2C bus IO offsets */
#define MLXREG_LC_REG_CPLD1_VER_OFFSET 0x2500
#define MLXREG_LC_REG_FPGA1_VER_OFFSET 0x2501
#define MLXREG_LC_REG_CPLD1_PN_OFFSET 0x2504
#define MLXREG_LC_REG_FPGA1_PN_OFFSET 0x2506
#define MLXREG_LC_REG_RESET_CAUSE_OFFSET 0x251d
#define MLXREG_LC_REG_LED1_OFFSET 0x2520
#define MLXREG_LC_REG_GP0_OFFSET 0x252e
#define MLXREG_LC_REG_FIELD_UPGRADE 0x2534
#define MLXREG_LC_CHANNEL_I2C_REG 0x25dc
#define MLXREG_LC_REG_CPLD1_MVER_OFFSET 0x25de
#define MLXREG_LC_REG_FPGA1_MVER_OFFSET 0x25df
#define MLXREG_LC_REG_MAX_POWER_OFFSET 0x25f1
#define MLXREG_LC_REG_CONFIG_OFFSET 0x25fb
#define MLXREG_LC_REG_MAX 0x3fff
/**
* enum mlxreg_lc_type - line cards types
*
* @MLXREG_LC_SN4800_C16: 100GbE line card with 16 QSFP28 ports;
*/
enum mlxreg_lc_type {
MLXREG_LC_SN4800_C16 = 0x0000,
};
/**
* enum mlxreg_lc_state - line cards state
*
* @MLXREG_LC_INITIALIZED: line card is initialized;
* @MLXREG_LC_POWERED: line card is powered;
* @MLXREG_LC_SYNCED: line card is synchronized between hardware and firmware;
*/
enum mlxreg_lc_state {
MLXREG_LC_INITIALIZED = BIT(0),
MLXREG_LC_POWERED = BIT(1),
MLXREG_LC_SYNCED = BIT(2),
};
#define MLXREG_LC_CONFIGURED (MLXREG_LC_INITIALIZED | MLXREG_LC_POWERED | MLXREG_LC_SYNCED)
/* mlxreg_lc - device private data
* @dev: platform device;
* @lock: line card lock;
* @par_regmap: parent device regmap handle;
* @data: pltaform core data;
* @io_data: register access platform data;
* @led_data: LED platform data ;
* @mux_data: MUX platform data;
* @led: LED device;
* @io_regs: register access device;
* @mux_brdinfo: mux configuration;
* @mux: mux devices;
* @aux_devs: I2C devices feeding by auxiliary power;
* @aux_devs_num: number of I2C devices feeding by auxiliary power;
* @main_devs: I2C devices feeding by main power;
* @main_devs_num: number of I2C devices feeding by main power;
* @state: line card state;
*/
struct mlxreg_lc {
struct device *dev;
struct mutex lock; /* line card access lock */
void *par_regmap;
struct mlxreg_core_data *data;
struct mlxreg_core_platform_data *io_data;
struct mlxreg_core_platform_data *led_data;
struct mlxcpld_mux_plat_data *mux_data;
struct platform_device *led;
struct platform_device *io_regs;
struct i2c_board_info *mux_brdinfo;
struct platform_device *mux;
struct mlxreg_hotplug_device *aux_devs;
int aux_devs_num;
struct mlxreg_hotplug_device *main_devs;
int main_devs_num;
enum mlxreg_lc_state state;
};
static bool mlxreg_lc_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXREG_LC_REG_LED1_OFFSET:
case MLXREG_LC_REG_GP0_OFFSET:
case MLXREG_LC_REG_FIELD_UPGRADE:
case MLXREG_LC_CHANNEL_I2C_REG:
return true;
}
return false;
}
static bool mlxreg_lc_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXREG_LC_REG_CPLD1_VER_OFFSET:
case MLXREG_LC_REG_FPGA1_VER_OFFSET:
case MLXREG_LC_REG_CPLD1_PN_OFFSET:
case MLXREG_LC_REG_FPGA1_PN_OFFSET:
case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
case MLXREG_LC_REG_LED1_OFFSET:
case MLXREG_LC_REG_GP0_OFFSET:
case MLXREG_LC_REG_FIELD_UPGRADE:
case MLXREG_LC_CHANNEL_I2C_REG:
case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
case MLXREG_LC_REG_MAX_POWER_OFFSET:
case MLXREG_LC_REG_CONFIG_OFFSET:
return true;
}
return false;
}
static bool mlxreg_lc_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXREG_LC_REG_CPLD1_VER_OFFSET:
case MLXREG_LC_REG_FPGA1_VER_OFFSET:
case MLXREG_LC_REG_CPLD1_PN_OFFSET:
case MLXREG_LC_REG_FPGA1_PN_OFFSET:
case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
case MLXREG_LC_REG_LED1_OFFSET:
case MLXREG_LC_REG_GP0_OFFSET:
case MLXREG_LC_REG_FIELD_UPGRADE:
case MLXREG_LC_CHANNEL_I2C_REG:
case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
case MLXREG_LC_REG_MAX_POWER_OFFSET:
case MLXREG_LC_REG_CONFIG_OFFSET:
return true;
}
return false;
}
static const struct reg_default mlxreg_lc_regmap_default[] = {
{ MLXREG_LC_CHANNEL_I2C_REG, 0x00 },
};
/* Configuration for the register map of a device with 2 bytes address space. */
static const struct regmap_config mlxreg_lc_regmap_conf = {
.reg_bits = 16,
.val_bits = 8,
.max_register = MLXREG_LC_REG_MAX,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxreg_lc_writeable_reg,
.readable_reg = mlxreg_lc_readable_reg,
.volatile_reg = mlxreg_lc_volatile_reg,
.reg_defaults = mlxreg_lc_regmap_default,
.num_reg_defaults = ARRAY_SIZE(mlxreg_lc_regmap_default),
};
/* Default channels vector.
* It contains only the channels, which physically connected to the devices,
* empty channels are skipped.
*/
static int mlxreg_lc_chan[] = {
0x04, 0x05, 0x06, 0x07, 0x08, 0x10, 0x20, 0x21, 0x22, 0x23, 0x40, 0x41,
0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d,
0x4e, 0x4f
};
/* Defaul mux configuration. */
static struct mlxcpld_mux_plat_data mlxreg_lc_mux_data[] = {
{
.chan_ids = mlxreg_lc_chan,
.num_adaps = ARRAY_SIZE(mlxreg_lc_chan),
.sel_reg_addr = MLXREG_LC_CHANNEL_I2C_REG,
.reg_size = 2,
},
};
/* Defaul mux board info. */
static struct i2c_board_info mlxreg_lc_mux_brdinfo = {
I2C_BOARD_INFO("i2c-mux-mlxcpld", 0x32),
};
/* Line card default auxiliary power static devices. */
static struct i2c_board_info mlxreg_lc_aux_pwr_devices[] = {
{
I2C_BOARD_INFO("24c32", 0x51),
},
{
I2C_BOARD_INFO("24c32", 0x51),
},
};
/* Line card default auxiliary power board info. */
static struct mlxreg_hotplug_device mlxreg_lc_aux_pwr_brdinfo[] = {
{
.brdinfo = &mlxreg_lc_aux_pwr_devices[0],
.nr = 3,
},
{
.brdinfo = &mlxreg_lc_aux_pwr_devices[1],
.nr = 4,
},
};
/* Line card default main power static devices. */
static struct i2c_board_info mlxreg_lc_main_pwr_devices[] = {
{
I2C_BOARD_INFO("mp2975", 0x62),
},
{
I2C_BOARD_INFO("mp2975", 0x64),
},
{
I2C_BOARD_INFO("max11603", 0x6d),
},
{
I2C_BOARD_INFO("lm25066", 0x15),
},
};
/* Line card default main power board info. */
static struct mlxreg_hotplug_device mlxreg_lc_main_pwr_brdinfo[] = {
{
.brdinfo = &mlxreg_lc_main_pwr_devices[0],
.nr = 0,
},
{
.brdinfo = &mlxreg_lc_main_pwr_devices[1],
.nr = 0,
},
{
.brdinfo = &mlxreg_lc_main_pwr_devices[2],
.nr = 1,
},
{
.brdinfo = &mlxreg_lc_main_pwr_devices[3],
.nr = 2,
},
};
/* LED default data. */
static struct mlxreg_core_data mlxreg_lc_led_data[] = {
{
.label = "status:green",
.reg = MLXREG_LC_REG_LED1_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "status:orange",
.reg = MLXREG_LC_REG_LED1_OFFSET,
.mask = GENMASK(7, 4),
},
};
static struct mlxreg_core_platform_data mlxreg_lc_led = {
.identity = "pci",
.data = mlxreg_lc_led_data,
.counter = ARRAY_SIZE(mlxreg_lc_led_data),
};
/* Default register access data. */
static struct mlxreg_core_data mlxreg_lc_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXREG_LC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "fpga1_version",
.reg = MLXREG_LC_REG_FPGA1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXREG_LC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "fpga1_pn",
.reg = MLXREG_LC_REG_FPGA1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXREG_LC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "fpga1_version_min",
.reg = MLXREG_LC_REG_FPGA1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "reset_fpga_not_done",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_ref",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_dc_dc_pwr_fail",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_from_chassis",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_pwr_off_from_chassis",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_line_card",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_line_card_pwr_en",
.reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "cpld_upgrade_en",
.reg = MLXREG_LC_REG_FIELD_UPGRADE,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
.secured = 1,
},
{
.label = "fpga_upgrade_en",
.reg = MLXREG_LC_REG_FIELD_UPGRADE,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0644,
.secured = 1,
},
{
.label = "qsfp_pwr_en",
.reg = MLXREG_LC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "vpd_wp",
.reg = MLXREG_LC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
.secured = 1,
},
{
.label = "agb_spi_burn_en",
.reg = MLXREG_LC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
.secured = 1,
},
{
.label = "fpga_spi_burn_en",
.reg = MLXREG_LC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
.secured = 1,
},
{
.label = "max_power",
.reg = MLXREG_LC_REG_MAX_POWER_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "config",
.reg = MLXREG_LC_REG_CONFIG_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
};
static struct mlxreg_core_platform_data mlxreg_lc_regs_io = {
.data = mlxreg_lc_io_data,
.counter = ARRAY_SIZE(mlxreg_lc_io_data),
};
static int
mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
int size)
{
struct mlxreg_hotplug_device *dev = devs;
int i, ret;
/* Create static I2C device feeding by auxiliary or main power. */
for (i = 0; i < size; i++, dev++) {
dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo);
if (IS_ERR(dev->client)) {
dev_err(mlxreg_lc->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
dev->adapter = NULL;
ret = PTR_ERR(dev->client);
goto fail_create_static_devices;
}
}
return 0;
fail_create_static_devices:
while (--i >= 0) {
dev = devs + i;
i2c_unregister_device(dev->client);
dev->client = NULL;
}
return ret;
}
static void
mlxreg_lc_destroy_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
int size)
{
struct mlxreg_hotplug_device *dev = devs;
int i;
/* Destroy static I2C device feeding by auxiliary or main power. */
for (i = 0; i < size; i++, dev++) {
if (dev->client) {
i2c_unregister_device(dev->client);
dev->client = NULL;
}
}
}
static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
{
u32 regval;
int err;
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, ®val);
if (err)
goto regmap_read_fail;
if (action)
regval |= BIT(mlxreg_lc->data->slot - 1);
else
regval &= ~BIT(mlxreg_lc->data->slot - 1);
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
regmap_read_fail:
return err;
}
static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
{
u32 regval;
int err;
/*
* Hardware holds the line card after powering on in the disabled state. Holding line card
* in disabled state protects access to the line components, like FPGA and gearboxes.
* Line card should be enabled in order to get it in operational state. Line card could be
* disabled for moving it to non-operational state. Enabling line card does not affect the
* line card which is already has been enabled. Disabling does not affect the disabled line
* card.
*/
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, ®val);
if (err)
goto regmap_read_fail;
if (action)
regval |= BIT(mlxreg_lc->data->slot - 1);
else
regval &= ~BIT(mlxreg_lc->data->slot - 1);
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
regmap_read_fail:
return err;
}
static int
mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
struct mlxreg_core_data *data)
{
struct device *dev = &data->hpdev.client->dev;
/* Set line card configuration according to the type. */
mlxreg_lc->mux_data = mlxreg_lc_mux_data;
mlxreg_lc->io_data = &mlxreg_lc_regs_io;
mlxreg_lc->led_data = &mlxreg_lc_led;
mlxreg_lc->mux_brdinfo = &mlxreg_lc_mux_brdinfo;
mlxreg_lc->aux_devs = devm_kmemdup(dev, mlxreg_lc_aux_pwr_brdinfo,
sizeof(mlxreg_lc_aux_pwr_brdinfo), GFP_KERNEL);
if (!mlxreg_lc->aux_devs)
return -ENOMEM;
mlxreg_lc->aux_devs_num = ARRAY_SIZE(mlxreg_lc_aux_pwr_brdinfo);
mlxreg_lc->main_devs = devm_kmemdup(dev, mlxreg_lc_main_pwr_brdinfo,
sizeof(mlxreg_lc_main_pwr_brdinfo), GFP_KERNEL);
if (!mlxreg_lc->main_devs)
return -ENOMEM;
mlxreg_lc->main_devs_num = ARRAY_SIZE(mlxreg_lc_main_pwr_brdinfo);
return 0;
}
static void
mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
if (action)
mlxreg_lc->state |= state;
else
mlxreg_lc->state &= ~state;
}
static void
mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
mutex_lock(&mlxreg_lc->lock);
if (action)
mlxreg_lc->state |= state;
else
mlxreg_lc->state &= ~state;
mutex_unlock(&mlxreg_lc->lock);
}
/*
* Callback is to be called from mlxreg-hotplug driver to notify about line card about received
* event.
*/
static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind, u8 action)
{
struct mlxreg_lc *mlxreg_lc = handle;
int err = 0;
dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
mutex_lock(&mlxreg_lc->lock);
if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
goto mlxreg_lc_non_initialzed_exit;
switch (kind) {
case MLXREG_HOTPLUG_LC_SYNCED:
/*
* Synchronization event - hardware and firmware are synchronized. Power on/off
* line card - to allow/disallow main power source.
*/
mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, action);
/* Power line card if it is not powered yet. */
if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
goto mlxreg_lc_power_on_off_fail;
}
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
break;
case MLXREG_HOTPLUG_LC_POWERED:
/* Power event - attach or de-attach line card device feeding by the main power. */
if (action) {
/* Do not create devices, if line card is already powered. */
if (mlxreg_lc->state & MLXREG_LC_POWERED) {
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
goto mlxreg_lc_enable_disable_exit;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
goto mlxreg_lc_create_static_devices_fail;
/* In case line card is already in ready state - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
} else {
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
}
mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, action);
break;
case MLXREG_HOTPLUG_LC_READY:
/*
* Ready event – enable line card by releasing it from reset or disable it by put
* to reset state.
*/
err = mlxreg_lc_enable_disable(mlxreg_lc, !!action);
break;
case MLXREG_HOTPLUG_LC_THERMAL:
/* Thermal shutdown event – power off line card. */
if (action)
err = mlxreg_lc_power_on_off(mlxreg_lc, 0);
break;
default:
break;
}
mlxreg_lc_enable_disable_exit:
mlxreg_lc_power_on_off_fail:
mlxreg_lc_create_static_devices_fail:
mlxreg_lc_non_initialzed_exit:
mutex_unlock(&mlxreg_lc->lock);
return err;
}
/*
* Callback is to be called from i2c-mux-mlxcpld driver to indicate that all adapter devices has
* been created.
*/
static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
struct i2c_adapter *adapters[])
{
struct mlxreg_hotplug_device *main_dev, *aux_dev;
struct mlxreg_lc *mlxreg_lc = handle;
u32 regval;
int i, err;
/* Update I2C devices feeding by auxiliary power. */
aux_dev = mlxreg_lc->aux_devs;
for (i = 0; i < mlxreg_lc->aux_devs_num; i++, aux_dev++) {
aux_dev->adapter = adapters[aux_dev->nr];
aux_dev->nr = adapters[aux_dev->nr]->nr;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->aux_devs,
mlxreg_lc->aux_devs_num);
if (err)
return err;
/* Update I2C devices feeding by main power. */
main_dev = mlxreg_lc->main_devs;
for (i = 0; i < mlxreg_lc->main_devs_num; i++, main_dev++) {
main_dev->adapter = adapters[main_dev->nr];
main_dev->nr = adapters[main_dev->nr]->nr;
}
/* Verify if line card is powered. */
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, ®val);
if (err)
goto mlxreg_lc_regmap_read_power_fail;
if (regval & mlxreg_lc->data->mask) {
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
goto mlxreg_lc_create_static_devices_failed;
mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
}
/* Verify if line card is synchronized. */
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_sync, ®val);
if (err)
goto mlxreg_lc_regmap_read_sync_fail;
/* Power on line card if necessary. */
if (regval & mlxreg_lc->data->mask) {
mlxreg_lc->state |= MLXREG_LC_SYNCED;
mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
goto mlxreg_lc_regmap_power_on_off_fail;
}
}
mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
return 0;
mlxreg_lc_regmap_power_on_off_fail:
mlxreg_lc_regmap_read_sync_fail:
if (mlxreg_lc->state & MLXREG_LC_POWERED)
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
mlxreg_lc_create_static_devices_failed:
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
mlxreg_lc_regmap_read_power_fail:
return err;
}
static int
mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
struct mlxreg_core_data *data)
{
struct device *dev = &data->hpdev.client->dev;
int lsb, err;
u32 regval;
/* Validate line card type. */
err = regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, &lsb);
err = (!err) ? regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, ®val) : err;
if (err)
return err;
regval = (regval & GENMASK(7, 0)) << 8 | (lsb & GENMASK(7, 0));
switch (regval) {
case MLXREG_LC_SN4800_C16:
err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
if (err) {
dev_err(dev, "Failed to config client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr,
data->hpdev.brdinfo->addr);
return err;
}
break;
default:
return -ENODEV;
}
/* Create mux infrastructure. */
mlxreg_lc->mux_data->handle = mlxreg_lc;
mlxreg_lc->mux_data->completion_notify = mlxreg_lc_completion_notify;
mlxreg_lc->mux_brdinfo->platform_data = mlxreg_lc->mux_data;
mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
NULL, 0, mlxreg_lc->mux_data,
sizeof(*mlxreg_lc->mux_data));
if (IS_ERR(mlxreg_lc->mux)) {
dev_err(dev, "Failed to create mux infra for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
return PTR_ERR(mlxreg_lc->mux);
}
/* Register IO access driver. */
if (mlxreg_lc->io_data) {
mlxreg_lc->io_data->regmap = regmap;
mlxreg_lc->io_regs =
platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
if (IS_ERR(mlxreg_lc->io_regs)) {
dev_err(dev, "Failed to create regio for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr,
data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->io_regs);
goto fail_register_io;
}
}
/* Register LED driver. */
if (mlxreg_lc->led_data) {
mlxreg_lc->led_data->regmap = regmap;
mlxreg_lc->led =
platform_device_register_resndata(dev, "leds-mlxreg", data->hpdev.nr, NULL, 0,
mlxreg_lc->led_data,
sizeof(*mlxreg_lc->led_data));
if (IS_ERR(mlxreg_lc->led)) {
dev_err(dev, "Failed to create LED objects for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr,
data->hpdev.brdinfo->addr);
err = PTR_ERR(mlxreg_lc->led);
goto fail_register_led;
}
}
return 0;
fail_register_led:
if (mlxreg_lc->io_regs)
platform_device_unregister(mlxreg_lc->io_regs);
fail_register_io:
if (mlxreg_lc->mux)
platform_device_unregister(mlxreg_lc->mux);
return err;
}
static void mlxreg_lc_config_exit(struct mlxreg_lc *mlxreg_lc)
{
/* Unregister LED driver. */
if (mlxreg_lc->led)
platform_device_unregister(mlxreg_lc->led);
/* Unregister IO access driver. */
if (mlxreg_lc->io_regs)
platform_device_unregister(mlxreg_lc->io_regs);
/* Remove mux infrastructure. */
if (mlxreg_lc->mux)
platform_device_unregister(mlxreg_lc->mux);
}
static int mlxreg_lc_probe(struct platform_device *pdev)
{
struct mlxreg_core_hotplug_platform_data *par_pdata;
struct mlxreg_core_data *data;
struct mlxreg_lc *mlxreg_lc;
void *regmap;
int i, err;
data = dev_get_platdata(&pdev->dev);
if (!data)
return -EINVAL;
mlxreg_lc = devm_kzalloc(&pdev->dev, sizeof(*mlxreg_lc), GFP_KERNEL);
if (!mlxreg_lc)
return -ENOMEM;
mutex_init(&mlxreg_lc->lock);
/* Set event notification callback. */
data->notifier->user_handler = mlxreg_lc_event_handler;
data->notifier->handle = mlxreg_lc;
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
data->hpdev.nr);
err = -EFAULT;
goto i2c_get_adapter_fail;
}
/* Create device at the top of line card I2C tree.*/
data->hpdev.client = i2c_new_client_device(data->hpdev.adapter,
data->hpdev.brdinfo);
if (IS_ERR(data->hpdev.client)) {
dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
err = PTR_ERR(data->hpdev.client);
goto i2c_new_device_fail;
}
regmap = devm_regmap_init_i2c(data->hpdev.client,
&mlxreg_lc_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "Failed to create regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
err = PTR_ERR(regmap);
goto devm_regmap_init_i2c_fail;
}
/* Set default registers. */
for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
mlxreg_lc_regmap_default[i].def);
if (err) {
dev_err(&pdev->dev, "Failed to set default regmap %d for client %s at bus %d at addr 0x%02x\n",
i, data->hpdev.brdinfo->type, data->hpdev.nr,
data->hpdev.brdinfo->addr);
goto regmap_write_fail;
}
}
/* Sync registers with hardware. */
regcache_mark_dirty(regmap);
err = regcache_sync(regmap);
if (err) {
dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
goto regcache_sync_fail;
}
par_pdata = data->hpdev.brdinfo->platform_data;
mlxreg_lc->par_regmap = par_pdata->regmap;
mlxreg_lc->data = data;
mlxreg_lc->dev = &pdev->dev;
platform_set_drvdata(pdev, mlxreg_lc);
/* Configure line card. */
err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
if (err)
goto mlxreg_lc_config_init_fail;
return 0;
mlxreg_lc_config_init_fail:
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
i2c_unregister_device(data->hpdev.client);
data->hpdev.client = NULL;
i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
i2c_get_adapter_fail:
/* Clear event notification callback and handle. */
if (data->notifier) {
data->notifier->user_handler = NULL;
data->notifier->handle = NULL;
}
return err;
}
static int mlxreg_lc_remove(struct platform_device *pdev)
{
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
/*
* Probing and removing are invoked by hotplug events raised upon line card insertion and
* removing. If probing procedure fails all data is cleared. However, hotplug event still
* will be raised on line card removing and activate removing procedure. In this case there
* is nothing to remove.
*/
if (!data->notifier || !data->notifier->handle)
return 0;
/* Clear event notification callback and handle. */
data->notifier->user_handler = NULL;
data->notifier->handle = NULL;
/* Destroy static I2C device feeding by main power. */
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
/* Destroy static I2C device feeding by auxiliary power. */
mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
/* Unregister underlying drivers. */
mlxreg_lc_config_exit(mlxreg_lc);
if (data->hpdev.client) {
i2c_unregister_device(data->hpdev.client);
data->hpdev.client = NULL;
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
}
return 0;
}
static struct platform_driver mlxreg_lc_driver = {
.probe = mlxreg_lc_probe,
.remove = mlxreg_lc_remove,
.driver = {
.name = "mlxreg-lc",
},
};
module_platform_driver(mlxreg_lc_driver);
MODULE_AUTHOR("Vadim Pasternak <[email protected]>");
MODULE_DESCRIPTION("Nvidia line card platform driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:mlxreg-lc");
| linux-master | drivers/platform/mellanox/mlxreg-lc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Nvidia sn2201 driver
*
* Copyright (C) 2022 Nvidia Technologies Ltd.
*/
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/platform_data/mlxcpld.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SN2201 CPLD register offset. */
#define NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR 0x2000
#define NVSW_SN2201_CPLD_LPC_IO_RANGE 0x100
#define NVSW_SN2201_HW_VER_ID_OFFSET 0x00
#define NVSW_SN2201_BOARD_ID_OFFSET 0x01
#define NVSW_SN2201_CPLD_VER_OFFSET 0x02
#define NVSW_SN2201_CPLD_MVER_OFFSET 0x03
#define NVSW_SN2201_CPLD_ID_OFFSET 0x04
#define NVSW_SN2201_CPLD_PN_OFFSET 0x05
#define NVSW_SN2201_CPLD_PN1_OFFSET 0x06
#define NVSW_SN2201_PSU_CTRL_OFFSET 0x0a
#define NVSW_SN2201_QSFP28_STATUS_OFFSET 0x0b
#define NVSW_SN2201_QSFP28_INT_STATUS_OFFSET 0x0c
#define NVSW_SN2201_QSFP28_LP_STATUS_OFFSET 0x0d
#define NVSW_SN2201_QSFP28_RST_STATUS_OFFSET 0x0e
#define NVSW_SN2201_SYS_STATUS_OFFSET 0x0f
#define NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET 0x10
#define NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET 0x12
#define NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET 0x13
#define NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET 0x14
#define NVSW_SN2201_SYS_RST_STATUS_OFFSET 0x15
#define NVSW_SN2201_SYS_INT_STATUS_OFFSET 0x21
#define NVSW_SN2201_SYS_INT_MASK_OFFSET 0x22
#define NVSW_SN2201_ASIC_STATUS_OFFSET 0x24
#define NVSW_SN2201_ASIC_EVENT_OFFSET 0x25
#define NVSW_SN2201_ASIC_MAKS_OFFSET 0x26
#define NVSW_SN2201_THML_STATUS_OFFSET 0x27
#define NVSW_SN2201_THML_EVENT_OFFSET 0x28
#define NVSW_SN2201_THML_MASK_OFFSET 0x29
#define NVSW_SN2201_PS_ALT_STATUS_OFFSET 0x2a
#define NVSW_SN2201_PS_ALT_EVENT_OFFSET 0x2b
#define NVSW_SN2201_PS_ALT_MASK_OFFSET 0x2c
#define NVSW_SN2201_PS_PRSNT_STATUS_OFFSET 0x30
#define NVSW_SN2201_PS_PRSNT_EVENT_OFFSET 0x31
#define NVSW_SN2201_PS_PRSNT_MASK_OFFSET 0x32
#define NVSW_SN2201_PS_DC_OK_STATUS_OFFSET 0x33
#define NVSW_SN2201_PS_DC_OK_EVENT_OFFSET 0x34
#define NVSW_SN2201_PS_DC_OK_MASK_OFFSET 0x35
#define NVSW_SN2201_RST_CAUSE1_OFFSET 0x36
#define NVSW_SN2201_RST_CAUSE2_OFFSET 0x37
#define NVSW_SN2201_RST_SW_CTRL_OFFSET 0x38
#define NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET 0x3a
#define NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET 0x3b
#define NVSW_SN2201_FAN_PRSNT_MASK_OFFSET 0x3c
#define NVSW_SN2201_WD_TMR_OFFSET_LSB 0x40
#define NVSW_SN2201_WD_TMR_OFFSET_MSB 0x41
#define NVSW_SN2201_WD_ACT_OFFSET 0x42
#define NVSW_SN2201_FAN_LED1_CTRL_OFFSET 0x50
#define NVSW_SN2201_FAN_LED2_CTRL_OFFSET 0x51
#define NVSW_SN2201_REG_MAX 0x52
/* Number of physical I2C busses. */
#define NVSW_SN2201_PHY_I2C_BUS_NUM 2
/* Number of main mux channels. */
#define NVSW_SN2201_MAIN_MUX_CHNL_NUM 8
#define NVSW_SN2201_MAIN_NR 0
#define NVSW_SN2201_MAIN_MUX_NR 1
#define NVSW_SN2201_MAIN_MUX_DEFER_NR (NVSW_SN2201_PHY_I2C_BUS_NUM + \
NVSW_SN2201_MAIN_MUX_CHNL_NUM - 1)
#define NVSW_SN2201_MAIN_MUX_CH0_NR NVSW_SN2201_PHY_I2C_BUS_NUM
#define NVSW_SN2201_MAIN_MUX_CH1_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 1)
#define NVSW_SN2201_MAIN_MUX_CH2_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 2)
#define NVSW_SN2201_MAIN_MUX_CH3_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 3)
#define NVSW_SN2201_MAIN_MUX_CH5_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 5)
#define NVSW_SN2201_MAIN_MUX_CH6_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 6)
#define NVSW_SN2201_MAIN_MUX_CH7_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 7)
#define NVSW_SN2201_2ND_MUX_CH0_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 1)
#define NVSW_SN2201_2ND_MUX_CH1_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 2)
#define NVSW_SN2201_2ND_MUX_CH2_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 3)
#define NVSW_SN2201_2ND_MUX_CH3_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 4)
#define NVSW_SN2201_CPLD_NR NVSW_SN2201_MAIN_MUX_CH0_NR
#define NVSW_SN2201_NR_NONE -1
/* Masks for aggregation, PSU presence and power, ASIC events
* in CPLD related registers.
*/
#define NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF 0xe0
#define NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF 0x04
#define NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF 0x02
#define NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF 0x10
#define NVSW_SN2201_CPLD_AGGR_MASK_DEF \
(NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF \
| NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF \
| NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF \
| NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF)
#define NVSW_SN2201_CPLD_ASIC_MASK GENMASK(3, 1)
#define NVSW_SN2201_CPLD_PSU_MASK GENMASK(1, 0)
#define NVSW_SN2201_CPLD_PWR_MASK GENMASK(1, 0)
#define NVSW_SN2201_CPLD_FAN_MASK GENMASK(3, 0)
#define NVSW_SN2201_CPLD_SYSIRQ 26
#define NVSW_SN2201_LPC_SYSIRQ 28
#define NVSW_SN2201_CPLD_I2CADDR 0x41
#define NVSW_SN2201_WD_DFLT_TIMEOUT 600
/* nvsw_sn2201 - device private data
* @dev: platform device;
* @io_data: register access platform data;
* @led_data: LED platform data;
* @hotplug_data: hotplug platform data;
* @i2c_data: I2C controller platform data;
* @led: LED device;
* @io_regs: register access device;
* @pdev_hotplug: hotplug device;
* @sn2201_devs: I2C devices for sn2201 devices;
* @sn2201_devs_num: number of I2C devices for sn2201 device;
* @main_mux_devs: I2C devices for main mux;
* @main_mux_devs_num: number of I2C devices for main mux;
* @cpld_devs: I2C devices for cpld;
* @cpld_devs_num: number of I2C devices for cpld;
* @main_mux_deferred_nr: I2C adapter number must be exist prior creating devices execution;
*/
struct nvsw_sn2201 {
struct device *dev;
struct mlxreg_core_platform_data *io_data;
struct mlxreg_core_platform_data *led_data;
struct mlxreg_core_platform_data *wd_data;
struct mlxreg_core_hotplug_platform_data *hotplug_data;
struct mlxreg_core_hotplug_platform_data *i2c_data;
struct platform_device *led;
struct platform_device *wd;
struct platform_device *io_regs;
struct platform_device *pdev_hotplug;
struct platform_device *pdev_i2c;
struct mlxreg_hotplug_device *sn2201_devs;
int sn2201_devs_num;
struct mlxreg_hotplug_device *main_mux_devs;
int main_mux_devs_num;
struct mlxreg_hotplug_device *cpld_devs;
int cpld_devs_num;
int main_mux_deferred_nr;
};
static bool nvsw_sn2201_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case NVSW_SN2201_PSU_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
case NVSW_SN2201_SYS_INT_MASK_OFFSET:
case NVSW_SN2201_ASIC_EVENT_OFFSET:
case NVSW_SN2201_ASIC_MAKS_OFFSET:
case NVSW_SN2201_THML_EVENT_OFFSET:
case NVSW_SN2201_THML_MASK_OFFSET:
case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
case NVSW_SN2201_PS_ALT_MASK_OFFSET:
case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
case NVSW_SN2201_RST_SW_CTRL_OFFSET:
case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
case NVSW_SN2201_WD_TMR_OFFSET_LSB:
case NVSW_SN2201_WD_TMR_OFFSET_MSB:
case NVSW_SN2201_WD_ACT_OFFSET:
case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
return true;
}
return false;
}
static bool nvsw_sn2201_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case NVSW_SN2201_HW_VER_ID_OFFSET:
case NVSW_SN2201_BOARD_ID_OFFSET:
case NVSW_SN2201_CPLD_VER_OFFSET:
case NVSW_SN2201_CPLD_MVER_OFFSET:
case NVSW_SN2201_CPLD_ID_OFFSET:
case NVSW_SN2201_CPLD_PN_OFFSET:
case NVSW_SN2201_CPLD_PN1_OFFSET:
case NVSW_SN2201_PSU_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
case NVSW_SN2201_SYS_STATUS_OFFSET:
case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
case NVSW_SN2201_RST_CAUSE1_OFFSET:
case NVSW_SN2201_RST_CAUSE2_OFFSET:
case NVSW_SN2201_SYS_INT_STATUS_OFFSET:
case NVSW_SN2201_SYS_INT_MASK_OFFSET:
case NVSW_SN2201_ASIC_STATUS_OFFSET:
case NVSW_SN2201_ASIC_EVENT_OFFSET:
case NVSW_SN2201_ASIC_MAKS_OFFSET:
case NVSW_SN2201_THML_STATUS_OFFSET:
case NVSW_SN2201_THML_EVENT_OFFSET:
case NVSW_SN2201_THML_MASK_OFFSET:
case NVSW_SN2201_PS_ALT_STATUS_OFFSET:
case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
case NVSW_SN2201_PS_ALT_MASK_OFFSET:
case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET:
case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET:
case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
case NVSW_SN2201_RST_SW_CTRL_OFFSET:
case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET:
case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
case NVSW_SN2201_WD_TMR_OFFSET_LSB:
case NVSW_SN2201_WD_TMR_OFFSET_MSB:
case NVSW_SN2201_WD_ACT_OFFSET:
case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
return true;
}
return false;
}
static bool nvsw_sn2201_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case NVSW_SN2201_HW_VER_ID_OFFSET:
case NVSW_SN2201_BOARD_ID_OFFSET:
case NVSW_SN2201_CPLD_VER_OFFSET:
case NVSW_SN2201_CPLD_MVER_OFFSET:
case NVSW_SN2201_CPLD_ID_OFFSET:
case NVSW_SN2201_CPLD_PN_OFFSET:
case NVSW_SN2201_CPLD_PN1_OFFSET:
case NVSW_SN2201_PSU_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_INT_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_LP_STATUS_OFFSET:
case NVSW_SN2201_QSFP28_RST_STATUS_OFFSET:
case NVSW_SN2201_SYS_STATUS_OFFSET:
case NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET:
case NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET:
case NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET:
case NVSW_SN2201_SYS_RST_STATUS_OFFSET:
case NVSW_SN2201_RST_CAUSE1_OFFSET:
case NVSW_SN2201_RST_CAUSE2_OFFSET:
case NVSW_SN2201_SYS_INT_STATUS_OFFSET:
case NVSW_SN2201_SYS_INT_MASK_OFFSET:
case NVSW_SN2201_ASIC_STATUS_OFFSET:
case NVSW_SN2201_ASIC_EVENT_OFFSET:
case NVSW_SN2201_ASIC_MAKS_OFFSET:
case NVSW_SN2201_THML_STATUS_OFFSET:
case NVSW_SN2201_THML_EVENT_OFFSET:
case NVSW_SN2201_THML_MASK_OFFSET:
case NVSW_SN2201_PS_ALT_STATUS_OFFSET:
case NVSW_SN2201_PS_ALT_EVENT_OFFSET:
case NVSW_SN2201_PS_ALT_MASK_OFFSET:
case NVSW_SN2201_PS_PRSNT_STATUS_OFFSET:
case NVSW_SN2201_PS_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_PS_PRSNT_MASK_OFFSET:
case NVSW_SN2201_PS_DC_OK_STATUS_OFFSET:
case NVSW_SN2201_PS_DC_OK_EVENT_OFFSET:
case NVSW_SN2201_PS_DC_OK_MASK_OFFSET:
case NVSW_SN2201_RST_SW_CTRL_OFFSET:
case NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET:
case NVSW_SN2201_FAN_PRSNT_EVENT_OFFSET:
case NVSW_SN2201_FAN_PRSNT_MASK_OFFSET:
case NVSW_SN2201_WD_TMR_OFFSET_LSB:
case NVSW_SN2201_WD_TMR_OFFSET_MSB:
case NVSW_SN2201_FAN_LED1_CTRL_OFFSET:
case NVSW_SN2201_FAN_LED2_CTRL_OFFSET:
return true;
}
return false;
}
static const struct reg_default nvsw_sn2201_regmap_default[] = {
{ NVSW_SN2201_QSFP28_LED_TEST_STATUS_OFFSET, 0x00 },
{ NVSW_SN2201_WD_ACT_OFFSET, 0x00 },
};
/* Configuration for the register map of a device with 1 bytes address space. */
static const struct regmap_config nvsw_sn2201_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = NVSW_SN2201_REG_MAX,
.cache_type = REGCACHE_FLAT,
.writeable_reg = nvsw_sn2201_writeable_reg,
.readable_reg = nvsw_sn2201_readable_reg,
.volatile_reg = nvsw_sn2201_volatile_reg,
.reg_defaults = nvsw_sn2201_regmap_default,
.num_reg_defaults = ARRAY_SIZE(nvsw_sn2201_regmap_default),
};
/* Regions for LPC I2C controller and LPC base register space. */
static const struct resource nvsw_sn2201_lpc_io_resources[] = {
[0] = DEFINE_RES_NAMED(NVSW_SN2201_CPLD_LPC_I2C_BASE_ADRR,
NVSW_SN2201_CPLD_LPC_IO_RANGE,
"mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
};
static struct resource nvsw_sn2201_cpld_res[] = {
[0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_CPLD_SYSIRQ, "mlxreg-hotplug"),
};
static struct resource nvsw_sn2201_lpc_res[] = {
[0] = DEFINE_RES_IRQ_NAMED(NVSW_SN2201_LPC_SYSIRQ, "i2c-mlxcpld"),
};
/* SN2201 I2C platform data. */
static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = {
.irq = NVSW_SN2201_CPLD_SYSIRQ,
};
/* SN2201 CPLD device. */
static struct i2c_board_info nvsw_sn2201_cpld_devices[] = {
{
I2C_BOARD_INFO("nvsw-sn2201", 0x41),
},
};
/* SN2201 CPLD board info. */
static struct mlxreg_hotplug_device nvsw_sn2201_cpld_brdinfo[] = {
{
.brdinfo = &nvsw_sn2201_cpld_devices[0],
.nr = NVSW_SN2201_CPLD_NR,
},
};
/* SN2201 main mux device. */
static struct i2c_board_info nvsw_sn2201_main_mux_devices[] = {
{
I2C_BOARD_INFO("pca9548", 0x70),
},
};
/* SN2201 main mux board info. */
static struct mlxreg_hotplug_device nvsw_sn2201_main_mux_brdinfo[] = {
{
.brdinfo = &nvsw_sn2201_main_mux_devices[0],
.nr = NVSW_SN2201_MAIN_MUX_NR,
},
};
/* SN2201 power devices. */
static struct i2c_board_info nvsw_sn2201_pwr_devices[] = {
{
I2C_BOARD_INFO("pmbus", 0x58),
},
{
I2C_BOARD_INFO("pmbus", 0x58),
},
};
/* SN2201 fan devices. */
static struct i2c_board_info nvsw_sn2201_fan_devices[] = {
{
I2C_BOARD_INFO("24c02", 0x50),
},
{
I2C_BOARD_INFO("24c02", 0x51),
},
{
I2C_BOARD_INFO("24c02", 0x52),
},
{
I2C_BOARD_INFO("24c02", 0x53),
},
};
/* SN2201 hotplug default data. */
static struct mlxreg_core_data nvsw_sn2201_psu_items_data[] = {
{
.label = "psu1",
.reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
.mask = BIT(0),
.hpdev.nr = NVSW_SN2201_NR_NONE,
},
{
.label = "psu2",
.reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
.mask = BIT(1),
.hpdev.nr = NVSW_SN2201_NR_NONE,
},
};
static struct mlxreg_core_data nvsw_sn2201_pwr_items_data[] = {
{
.label = "pwr1",
.reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &nvsw_sn2201_pwr_devices[0],
.hpdev.nr = NVSW_SN2201_MAIN_MUX_CH1_NR,
},
{
.label = "pwr2",
.reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &nvsw_sn2201_pwr_devices[1],
.hpdev.nr = NVSW_SN2201_MAIN_MUX_CH2_NR,
},
};
static struct mlxreg_core_data nvsw_sn2201_fan_items_data[] = {
{
.label = "fan1",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[0],
.hpdev.nr = NVSW_SN2201_2ND_MUX_CH0_NR,
},
{
.label = "fan2",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[1],
.hpdev.nr = NVSW_SN2201_2ND_MUX_CH1_NR,
},
{
.label = "fan3",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[2],
.hpdev.nr = NVSW_SN2201_2ND_MUX_CH2_NR,
},
{
.label = "fan4",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[3],
.hpdev.nr = NVSW_SN2201_2ND_MUX_CH3_NR,
},
};
static struct mlxreg_core_data nvsw_sn2201_sys_items_data[] = {
{
.label = "nic_smb_alert",
.reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
.mask = BIT(1),
.hpdev.nr = NVSW_SN2201_NR_NONE,
},
{
.label = "cpu_sd",
.reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
.mask = BIT(2),
.hpdev.nr = NVSW_SN2201_NR_NONE,
},
{
.label = "mac_health",
.reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
.mask = BIT(3),
.hpdev.nr = NVSW_SN2201_NR_NONE,
},
};
static struct mlxreg_core_item nvsw_sn2201_items[] = {
{
.data = nvsw_sn2201_psu_items_data,
.aggr_mask = NVSW_SN2201_CPLD_AGGR_PSU_MASK_DEF,
.reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_PSU_MASK,
.count = ARRAY_SIZE(nvsw_sn2201_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = nvsw_sn2201_pwr_items_data,
.aggr_mask = NVSW_SN2201_CPLD_AGGR_PWR_MASK_DEF,
.reg = NVSW_SN2201_PS_DC_OK_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_PWR_MASK,
.count = ARRAY_SIZE(nvsw_sn2201_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = nvsw_sn2201_fan_items_data,
.aggr_mask = NVSW_SN2201_CPLD_AGGR_FAN_MASK_DEF,
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_FAN_MASK,
.count = ARRAY_SIZE(nvsw_sn2201_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = nvsw_sn2201_sys_items_data,
.aggr_mask = NVSW_SN2201_CPLD_AGGR_ASIC_MASK_DEF,
.reg = NVSW_SN2201_ASIC_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(nvsw_sn2201_sys_items_data),
.inversed = 1,
.health = false,
},
};
static
struct mlxreg_core_hotplug_platform_data nvsw_sn2201_hotplug = {
.items = nvsw_sn2201_items,
.counter = ARRAY_SIZE(nvsw_sn2201_items),
.cell = NVSW_SN2201_SYS_INT_STATUS_OFFSET,
.mask = NVSW_SN2201_CPLD_AGGR_MASK_DEF,
};
/* SN2201 static devices. */
static struct i2c_board_info nvsw_sn2201_static_devices[] = {
{
I2C_BOARD_INFO("24c02", 0x57),
},
{
I2C_BOARD_INFO("lm75", 0x4b),
},
{
I2C_BOARD_INFO("24c64", 0x56),
},
{
I2C_BOARD_INFO("ads1015", 0x49),
},
{
I2C_BOARD_INFO("pca9546", 0x71),
},
{
I2C_BOARD_INFO("emc2305", 0x4d),
},
{
I2C_BOARD_INFO("lm75", 0x49),
},
{
I2C_BOARD_INFO("pca9555", 0x27),
},
{
I2C_BOARD_INFO("powr1014", 0x37),
},
{
I2C_BOARD_INFO("lm75", 0x4f),
},
{
I2C_BOARD_INFO("pmbus", 0x40),
},
};
/* SN2201 default static board info. */
static struct mlxreg_hotplug_device nvsw_sn2201_static_brdinfo[] = {
{
.brdinfo = &nvsw_sn2201_static_devices[0],
.nr = NVSW_SN2201_MAIN_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[1],
.nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[2],
.nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[3],
.nr = NVSW_SN2201_MAIN_MUX_CH0_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[4],
.nr = NVSW_SN2201_MAIN_MUX_CH3_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[5],
.nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[6],
.nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[7],
.nr = NVSW_SN2201_MAIN_MUX_CH5_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[8],
.nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[9],
.nr = NVSW_SN2201_MAIN_MUX_CH6_NR,
},
{
.brdinfo = &nvsw_sn2201_static_devices[10],
.nr = NVSW_SN2201_MAIN_MUX_CH7_NR,
},
};
/* LED default data. */
static struct mlxreg_core_data nvsw_sn2201_led_data[] = {
{
.label = "status:green",
.reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "status:orange",
.reg = NVSW_SN2201_FRONT_SYS_LED_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "psu:green",
.reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "psu:orange",
.reg = NVSW_SN2201_FRONT_PSU_LED_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "uid:blue",
.reg = NVSW_SN2201_FRONT_UID_LED_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "fan1:green",
.reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "fan1:orange",
.reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "fan2:green",
.reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
.mask = GENMASK(3, 0),
},
{
.label = "fan2:orange",
.reg = NVSW_SN2201_FAN_LED1_CTRL_OFFSET,
.mask = GENMASK(3, 0),
},
{
.label = "fan3:green",
.reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "fan3:orange",
.reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
.mask = GENMASK(7, 4),
},
{
.label = "fan4:green",
.reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
.mask = GENMASK(3, 0),
},
{
.label = "fan4:orange",
.reg = NVSW_SN2201_FAN_LED2_CTRL_OFFSET,
.mask = GENMASK(3, 0),
},
};
static struct mlxreg_core_platform_data nvsw_sn2201_led = {
.data = nvsw_sn2201_led_data,
.counter = ARRAY_SIZE(nvsw_sn2201_led_data),
};
/* Default register access data. */
static struct mlxreg_core_data nvsw_sn2201_io_data[] = {
{
.label = "cpld1_version",
.reg = NVSW_SN2201_CPLD_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_version_min",
.reg = NVSW_SN2201_CPLD_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = NVSW_SN2201_CPLD_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "psu1_on",
.reg = NVSW_SN2201_PSU_CTRL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "psu2_on",
.reg = NVSW_SN2201_PSU_CTRL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0644,
},
{
.label = "pwr_cycle",
.reg = NVSW_SN2201_PSU_CTRL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0644,
},
{
.label = "asic_health",
.reg = NVSW_SN2201_SYS_STATUS_OFFSET,
.mask = GENMASK(4, 3),
.bit = 4,
.mode = 0444,
},
{
.label = "qsfp_pwr_good",
.reg = NVSW_SN2201_SYS_STATUS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "phy_reset",
.reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "mac_reset",
.reg = NVSW_SN2201_SYS_RST_STATUS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0644,
},
{
.label = "pwr_down",
.reg = NVSW_SN2201_RST_SW_CTRL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "reset_long_pb",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_short_pb",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_fu",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_swb_dc_dc_pwr_fail",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_sw_reset",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_fw_reset",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_swb_wd",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_asic_thermal",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "reset_system",
.reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_sw_pwr_off",
.reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_cpu_pwr_fail_thermal",
.reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_reload_bios",
.reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_ac_pwr_fail",
.reg = NVSW_SN2201_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "psu1",
.reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "psu2",
.reg = NVSW_SN2201_PS_PRSNT_STATUS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
};
static struct mlxreg_core_platform_data nvsw_sn2201_regs_io = {
.data = nvsw_sn2201_io_data,
.counter = ARRAY_SIZE(nvsw_sn2201_io_data),
};
/* Default watchdog data. */
static struct mlxreg_core_data nvsw_sn2201_wd_data[] = {
{
.label = "action",
.reg = NVSW_SN2201_WD_ACT_OFFSET,
.mask = GENMASK(7, 1),
.bit = 0,
},
{
.label = "timeout",
.reg = NVSW_SN2201_WD_TMR_OFFSET_LSB,
.mask = 0,
.health_cntr = NVSW_SN2201_WD_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = NVSW_SN2201_WD_TMR_OFFSET_LSB,
.mask = 0,
},
{
.label = "ping",
.reg = NVSW_SN2201_WD_ACT_OFFSET,
.mask = GENMASK(7, 1),
.bit = 0,
},
{
.label = "reset",
.reg = NVSW_SN2201_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_platform_data nvsw_sn2201_wd = {
.data = nvsw_sn2201_wd_data,
.counter = ARRAY_SIZE(nvsw_sn2201_wd_data),
.version = MLX_WDT_TYPE3,
.identity = "mlx-wdt-main",
};
static int
nvsw_sn2201_create_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
struct mlxreg_hotplug_device *devs,
int size)
{
struct mlxreg_hotplug_device *dev = devs;
int ret;
int i;
/* Create I2C static devices. */
for (i = 0; i < size; i++, dev++) {
dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo);
if (IS_ERR(dev->client)) {
dev_err(nvsw_sn2201->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
dev->brdinfo->type,
dev->nr, dev->brdinfo->addr);
dev->adapter = NULL;
ret = PTR_ERR(dev->client);
goto fail_create_static_devices;
}
}
return 0;
fail_create_static_devices:
while (--i >= 0) {
dev = devs + i;
i2c_unregister_device(dev->client);
dev->client = NULL;
dev->adapter = NULL;
}
return ret;
}
static void nvsw_sn2201_destroy_static_devices(struct nvsw_sn2201 *nvsw_sn2201,
struct mlxreg_hotplug_device *devs, int size)
{
struct mlxreg_hotplug_device *dev = devs;
int i;
/* Destroy static I2C device for SN2201 static devices. */
for (i = 0; i < size; i++, dev++) {
if (dev->client) {
i2c_unregister_device(dev->client);
dev->client = NULL;
i2c_put_adapter(dev->adapter);
dev->adapter = NULL;
}
}
}
static int nvsw_sn2201_config_post_init(struct nvsw_sn2201 *nvsw_sn2201)
{
struct mlxreg_hotplug_device *sn2201_dev;
struct i2c_adapter *adap;
struct device *dev;
int i, err;
dev = nvsw_sn2201->dev;
adap = i2c_get_adapter(nvsw_sn2201->main_mux_deferred_nr);
if (!adap) {
dev_err(dev, "Failed to get adapter for bus %d\n",
nvsw_sn2201->main_mux_deferred_nr);
return -ENODEV;
}
i2c_put_adapter(adap);
/* Update board info. */
sn2201_dev = nvsw_sn2201->sn2201_devs;
for (i = 0; i < nvsw_sn2201->sn2201_devs_num; i++, sn2201_dev++) {
sn2201_dev->adapter = i2c_get_adapter(sn2201_dev->nr);
if (!sn2201_dev->adapter)
return -ENODEV;
i2c_put_adapter(sn2201_dev->adapter);
}
err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs,
nvsw_sn2201->sn2201_devs_num);
if (err)
dev_err(dev, "Failed to create static devices\n");
return err;
}
static int nvsw_sn2201_config_init(struct nvsw_sn2201 *nvsw_sn2201, void *regmap)
{
struct device *dev = nvsw_sn2201->dev;
int err;
nvsw_sn2201->io_data = &nvsw_sn2201_regs_io;
nvsw_sn2201->led_data = &nvsw_sn2201_led;
nvsw_sn2201->wd_data = &nvsw_sn2201_wd;
nvsw_sn2201->hotplug_data = &nvsw_sn2201_hotplug;
/* Register IO access driver. */
if (nvsw_sn2201->io_data) {
nvsw_sn2201->io_data->regmap = regmap;
nvsw_sn2201->io_regs =
platform_device_register_resndata(dev, "mlxreg-io", PLATFORM_DEVID_NONE, NULL, 0,
nvsw_sn2201->io_data,
sizeof(*nvsw_sn2201->io_data));
if (IS_ERR(nvsw_sn2201->io_regs)) {
err = PTR_ERR(nvsw_sn2201->io_regs);
goto fail_register_io;
}
}
/* Register LED driver. */
if (nvsw_sn2201->led_data) {
nvsw_sn2201->led_data->regmap = regmap;
nvsw_sn2201->led =
platform_device_register_resndata(dev, "leds-mlxreg", PLATFORM_DEVID_NONE, NULL, 0,
nvsw_sn2201->led_data,
sizeof(*nvsw_sn2201->led_data));
if (IS_ERR(nvsw_sn2201->led)) {
err = PTR_ERR(nvsw_sn2201->led);
goto fail_register_led;
}
}
/* Register WD driver. */
if (nvsw_sn2201->wd_data) {
nvsw_sn2201->wd_data->regmap = regmap;
nvsw_sn2201->wd =
platform_device_register_resndata(dev, "mlx-wdt", PLATFORM_DEVID_NONE, NULL, 0,
nvsw_sn2201->wd_data,
sizeof(*nvsw_sn2201->wd_data));
if (IS_ERR(nvsw_sn2201->wd)) {
err = PTR_ERR(nvsw_sn2201->wd);
goto fail_register_wd;
}
}
/* Register hotplug driver. */
if (nvsw_sn2201->hotplug_data) {
nvsw_sn2201->hotplug_data->regmap = regmap;
nvsw_sn2201->pdev_hotplug =
platform_device_register_resndata(dev, "mlxreg-hotplug", PLATFORM_DEVID_NONE,
nvsw_sn2201_cpld_res,
ARRAY_SIZE(nvsw_sn2201_cpld_res),
nvsw_sn2201->hotplug_data,
sizeof(*nvsw_sn2201->hotplug_data));
if (IS_ERR(nvsw_sn2201->pdev_hotplug)) {
err = PTR_ERR(nvsw_sn2201->pdev_hotplug);
goto fail_register_hotplug;
}
}
return nvsw_sn2201_config_post_init(nvsw_sn2201);
fail_register_hotplug:
if (nvsw_sn2201->wd)
platform_device_unregister(nvsw_sn2201->wd);
fail_register_wd:
if (nvsw_sn2201->led)
platform_device_unregister(nvsw_sn2201->led);
fail_register_led:
if (nvsw_sn2201->io_regs)
platform_device_unregister(nvsw_sn2201->io_regs);
fail_register_io:
return err;
}
static void nvsw_sn2201_config_exit(struct nvsw_sn2201 *nvsw_sn2201)
{
/* Unregister hotplug driver. */
if (nvsw_sn2201->pdev_hotplug)
platform_device_unregister(nvsw_sn2201->pdev_hotplug);
/* Unregister WD driver. */
if (nvsw_sn2201->wd)
platform_device_unregister(nvsw_sn2201->wd);
/* Unregister LED driver. */
if (nvsw_sn2201->led)
platform_device_unregister(nvsw_sn2201->led);
/* Unregister IO access driver. */
if (nvsw_sn2201->io_regs)
platform_device_unregister(nvsw_sn2201->io_regs);
}
/*
* Initialization is divided into two parts:
* - I2C main bus init.
* - Mux creation and attaching devices to the mux,
* which assumes that the main bus is already created.
* This separation is required for synchronization between these two parts.
* Completion notify callback is used to make this flow synchronized.
*/
static int nvsw_sn2201_i2c_completion_notify(void *handle, int id)
{
struct nvsw_sn2201 *nvsw_sn2201 = handle;
void *regmap;
int i, err;
/* Create main mux. */
nvsw_sn2201->main_mux_devs->adapter = i2c_get_adapter(nvsw_sn2201->main_mux_devs->nr);
if (!nvsw_sn2201->main_mux_devs->adapter) {
err = -ENODEV;
dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n",
nvsw_sn2201->cpld_devs->nr);
goto i2c_get_adapter_main_fail;
}
nvsw_sn2201->main_mux_devs_num = ARRAY_SIZE(nvsw_sn2201_main_mux_brdinfo);
err = nvsw_sn2201_create_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs,
nvsw_sn2201->main_mux_devs_num);
if (err) {
dev_err(nvsw_sn2201->dev, "Failed to create main mux devices\n");
goto nvsw_sn2201_create_static_devices_fail;
}
nvsw_sn2201->cpld_devs->adapter = i2c_get_adapter(nvsw_sn2201->cpld_devs->nr);
if (!nvsw_sn2201->cpld_devs->adapter) {
err = -ENODEV;
dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n",
nvsw_sn2201->cpld_devs->nr);
goto i2c_get_adapter_fail;
}
/* Create CPLD device. */
nvsw_sn2201->cpld_devs->client = i2c_new_dummy_device(nvsw_sn2201->cpld_devs->adapter,
NVSW_SN2201_CPLD_I2CADDR);
if (IS_ERR(nvsw_sn2201->cpld_devs->client)) {
err = PTR_ERR(nvsw_sn2201->cpld_devs->client);
dev_err(nvsw_sn2201->dev, "Failed to create %s cpld device at bus %d at addr 0x%02x\n",
nvsw_sn2201->cpld_devs->brdinfo->type, nvsw_sn2201->cpld_devs->nr,
nvsw_sn2201->cpld_devs->brdinfo->addr);
goto i2c_new_dummy_fail;
}
regmap = devm_regmap_init_i2c(nvsw_sn2201->cpld_devs->client, &nvsw_sn2201_regmap_conf);
if (IS_ERR(regmap)) {
err = PTR_ERR(regmap);
dev_err(nvsw_sn2201->dev, "Failed to initialise managed register map\n");
goto devm_regmap_init_i2c_fail;
}
/* Set default registers. */
for (i = 0; i < nvsw_sn2201_regmap_conf.num_reg_defaults; i++) {
err = regmap_write(regmap, nvsw_sn2201_regmap_default[i].reg,
nvsw_sn2201_regmap_default[i].def);
if (err) {
dev_err(nvsw_sn2201->dev, "Failed to set register at offset 0x%02x to default value: 0x%02x\n",
nvsw_sn2201_regmap_default[i].reg,
nvsw_sn2201_regmap_default[i].def);
goto regmap_write_fail;
}
}
/* Sync registers with hardware. */
regcache_mark_dirty(regmap);
err = regcache_sync(regmap);
if (err) {
dev_err(nvsw_sn2201->dev, "Failed to Sync registers with hardware\n");
goto regcache_sync_fail;
}
/* Configure SN2201 board. */
err = nvsw_sn2201_config_init(nvsw_sn2201, regmap);
if (err) {
dev_err(nvsw_sn2201->dev, "Failed to configure board\n");
goto nvsw_sn2201_config_init_fail;
}
return 0;
nvsw_sn2201_config_init_fail:
nvsw_sn2201_config_exit(nvsw_sn2201);
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
i2c_new_dummy_fail:
i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter);
nvsw_sn2201->cpld_devs->adapter = NULL;
i2c_get_adapter_fail:
/* Destroy SN2201 static I2C devices. */
nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->sn2201_devs,
nvsw_sn2201->sn2201_devs_num);
/* Destroy main mux device. */
nvsw_sn2201_destroy_static_devices(nvsw_sn2201, nvsw_sn2201->main_mux_devs,
nvsw_sn2201->main_mux_devs_num);
nvsw_sn2201_create_static_devices_fail:
i2c_put_adapter(nvsw_sn2201->main_mux_devs->adapter);
i2c_get_adapter_main_fail:
return err;
}
static int nvsw_sn2201_config_pre_init(struct nvsw_sn2201 *nvsw_sn2201)
{
nvsw_sn2201->i2c_data = &nvsw_sn2201_i2c_data;
/* Register I2C controller. */
nvsw_sn2201->i2c_data->handle = nvsw_sn2201;
nvsw_sn2201->i2c_data->completion_notify = nvsw_sn2201_i2c_completion_notify;
nvsw_sn2201->pdev_i2c = platform_device_register_resndata(nvsw_sn2201->dev, "i2c_mlxcpld",
NVSW_SN2201_MAIN_MUX_NR,
nvsw_sn2201_lpc_res,
ARRAY_SIZE(nvsw_sn2201_lpc_res),
nvsw_sn2201->i2c_data,
sizeof(*nvsw_sn2201->i2c_data));
if (IS_ERR(nvsw_sn2201->pdev_i2c))
return PTR_ERR(nvsw_sn2201->pdev_i2c);
return 0;
}
static int nvsw_sn2201_probe(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201;
nvsw_sn2201 = devm_kzalloc(&pdev->dev, sizeof(*nvsw_sn2201), GFP_KERNEL);
if (!nvsw_sn2201)
return -ENOMEM;
nvsw_sn2201->dev = &pdev->dev;
platform_set_drvdata(pdev, nvsw_sn2201);
platform_device_add_resources(pdev, nvsw_sn2201_lpc_io_resources,
ARRAY_SIZE(nvsw_sn2201_lpc_io_resources));
nvsw_sn2201->main_mux_deferred_nr = NVSW_SN2201_MAIN_MUX_DEFER_NR;
nvsw_sn2201->main_mux_devs = nvsw_sn2201_main_mux_brdinfo;
nvsw_sn2201->cpld_devs = nvsw_sn2201_cpld_brdinfo;
nvsw_sn2201->sn2201_devs = nvsw_sn2201_static_brdinfo;
nvsw_sn2201->sn2201_devs_num = ARRAY_SIZE(nvsw_sn2201_static_brdinfo);
return nvsw_sn2201_config_pre_init(nvsw_sn2201);
}
static int nvsw_sn2201_remove(struct platform_device *pdev)
{
struct nvsw_sn2201 *nvsw_sn2201 = platform_get_drvdata(pdev);
/* Unregister underlying drivers. */
nvsw_sn2201_config_exit(nvsw_sn2201);
/* Destroy SN2201 static I2C devices. */
nvsw_sn2201_destroy_static_devices(nvsw_sn2201,
nvsw_sn2201->sn2201_devs,
nvsw_sn2201->sn2201_devs_num);
i2c_put_adapter(nvsw_sn2201->cpld_devs->adapter);
nvsw_sn2201->cpld_devs->adapter = NULL;
/* Destroy main mux device. */
nvsw_sn2201_destroy_static_devices(nvsw_sn2201,
nvsw_sn2201->main_mux_devs,
nvsw_sn2201->main_mux_devs_num);
/* Unregister I2C controller. */
if (nvsw_sn2201->pdev_i2c)
platform_device_unregister(nvsw_sn2201->pdev_i2c);
return 0;
}
static const struct acpi_device_id nvsw_sn2201_acpi_ids[] = {
{"NVSN2201", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, nvsw_sn2201_acpi_ids);
static struct platform_driver nvsw_sn2201_driver = {
.probe = nvsw_sn2201_probe,
.remove = nvsw_sn2201_remove,
.driver = {
.name = "nvsw-sn2201",
.acpi_match_table = nvsw_sn2201_acpi_ids,
},
};
module_platform_driver(nvsw_sn2201_driver);
MODULE_AUTHOR("Nvidia");
MODULE_DESCRIPTION("Nvidia sn2201 platform driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:nvsw-sn2201");
| linux-master | drivers/platform/mellanox/nvsw-sn2201.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox boot control driver
*
* This driver provides a sysfs interface for systems management
* software to manage reset-time actions.
*
* Copyright (C) 2019 Mellanox Technologies
*/
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "mlxbf-bootctl.h"
#define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
#define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
#define MLXBF_SB_KEY_NUM 4
/* UUID used to probe ATF service. */
static const char *mlxbf_bootctl_svc_uuid_str =
"89c036b4-e7d7-11e6-8797-001aca00bfc4";
struct mlxbf_bootctl_name {
u32 value;
const char *name;
};
static struct mlxbf_bootctl_name boot_names[] = {
{ MLXBF_BOOTCTL_EXTERNAL, "external" },
{ MLXBF_BOOTCTL_EMMC, "emmc" },
{ MLNX_BOOTCTL_SWAP_EMMC, "swap_emmc" },
{ MLXBF_BOOTCTL_EMMC_LEGACY, "emmc_legacy" },
{ MLXBF_BOOTCTL_NONE, "none" },
};
static const char * const mlxbf_bootctl_lifecycle_states[] = {
[0] = "Production",
[1] = "GA Secured",
[2] = "GA Non-Secured",
[3] = "RMA",
};
/* Log header format. */
#define MLXBF_RSH_LOG_TYPE_MASK GENMASK_ULL(59, 56)
#define MLXBF_RSH_LOG_LEN_MASK GENMASK_ULL(54, 48)
#define MLXBF_RSH_LOG_LEVEL_MASK GENMASK_ULL(7, 0)
/* Log module ID and type (only MSG type in Linux driver for now). */
#define MLXBF_RSH_LOG_TYPE_MSG 0x04ULL
/* Log ctl/data register offset. */
#define MLXBF_RSH_SCRATCH_BUF_CTL_OFF 0
#define MLXBF_RSH_SCRATCH_BUF_DATA_OFF 0x10
/* Log message levels. */
enum {
MLXBF_RSH_LOG_INFO,
MLXBF_RSH_LOG_WARN,
MLXBF_RSH_LOG_ERR,
MLXBF_RSH_LOG_ASSERT
};
/* Mapped pointer for RSH_BOOT_FIFO_DATA and RSH_BOOT_FIFO_COUNT register. */
static void __iomem *mlxbf_rsh_boot_data;
static void __iomem *mlxbf_rsh_boot_cnt;
/* Mapped pointer for rsh log semaphore/ctrl/data register. */
static void __iomem *mlxbf_rsh_semaphore;
static void __iomem *mlxbf_rsh_scratch_buf_ctl;
static void __iomem *mlxbf_rsh_scratch_buf_data;
/* Rsh log levels. */
static const char * const mlxbf_rsh_log_level[] = {
"INFO", "WARN", "ERR", "ASSERT"};
static DEFINE_MUTEX(icm_ops_lock);
static DEFINE_MUTEX(os_up_lock);
static DEFINE_MUTEX(mfg_ops_lock);
/*
* Objects are stored within the MFG partition per type.
* Type 0 is not supported.
*/
enum {
MLNX_MFG_TYPE_OOB_MAC = 1,
MLNX_MFG_TYPE_OPN_0,
MLNX_MFG_TYPE_OPN_1,
MLNX_MFG_TYPE_OPN_2,
MLNX_MFG_TYPE_SKU_0,
MLNX_MFG_TYPE_SKU_1,
MLNX_MFG_TYPE_SKU_2,
MLNX_MFG_TYPE_MODL_0,
MLNX_MFG_TYPE_MODL_1,
MLNX_MFG_TYPE_MODL_2,
MLNX_MFG_TYPE_SN_0,
MLNX_MFG_TYPE_SN_1,
MLNX_MFG_TYPE_SN_2,
MLNX_MFG_TYPE_UUID_0,
MLNX_MFG_TYPE_UUID_1,
MLNX_MFG_TYPE_UUID_2,
MLNX_MFG_TYPE_UUID_3,
MLNX_MFG_TYPE_UUID_4,
MLNX_MFG_TYPE_REV,
};
#define MLNX_MFG_OPN_VAL_LEN 24
#define MLNX_MFG_SKU_VAL_LEN 24
#define MLNX_MFG_MODL_VAL_LEN 24
#define MLNX_MFG_SN_VAL_LEN 24
#define MLNX_MFG_UUID_VAL_LEN 40
#define MLNX_MFG_REV_VAL_LEN 8
#define MLNX_MFG_VAL_QWORD_CNT(type) \
(MLNX_MFG_##type##_VAL_LEN / sizeof(u64))
/*
* The MAC address consists of 6 bytes (2 digits each) separated by ':'.
* The expected format is: "XX:XX:XX:XX:XX:XX"
*/
#define MLNX_MFG_OOB_MAC_FORMAT_LEN \
((ETH_ALEN * 2) + (ETH_ALEN - 1))
/* ARM SMC call which is atomic and no need for lock. */
static int mlxbf_bootctl_smc(unsigned int smc_op, int smc_arg)
{
struct arm_smccc_res res;
arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
/* Return the action in integer or an error code. */
static int mlxbf_bootctl_reset_action_to_val(const char *action)
{
int i;
for (i = 0; i < ARRAY_SIZE(boot_names); i++)
if (sysfs_streq(boot_names[i].name, action))
return boot_names[i].value;
return -EINVAL;
}
/* Return the action in string. */
static const char *mlxbf_bootctl_action_to_string(int action)
{
int i;
for (i = 0; i < ARRAY_SIZE(boot_names); i++)
if (boot_names[i].value == action)
return boot_names[i].name;
return "invalid action";
}
static ssize_t post_reset_wdog_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_POST_RESET_WDOG, 0);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", ret);
}
static ssize_t post_reset_wdog_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long value;
int ret;
ret = kstrtoul(buf, 10, &value);
if (ret)
return ret;
ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_POST_RESET_WDOG, value);
if (ret < 0)
return ret;
return count;
}
static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
{
int action;
action = mlxbf_bootctl_smc(smc_op, 0);
if (action < 0)
return action;
return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
}
static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
{
int ret, action;
action = mlxbf_bootctl_reset_action_to_val(buf);
if (action < 0)
return action;
ret = mlxbf_bootctl_smc(smc_op, action);
if (ret < 0)
return ret;
return count;
}
static ssize_t reset_action_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_RESET_ACTION, buf);
}
static ssize_t reset_action_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_RESET_ACTION, buf, count);
}
static ssize_t second_reset_action_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION, buf);
}
static ssize_t second_reset_action_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION, buf,
count);
}
static ssize_t lifecycle_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int lc_state;
lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
if (lc_state < 0)
return lc_state;
lc_state &=
MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
/*
* If the test bits are set, we specify that the current state may be
* due to using the test bits.
*/
if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
return sprintf(buf, "%s(test)\n",
mlxbf_bootctl_lifecycle_states[lc_state]);
}
return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
}
static ssize_t secure_boot_fuse_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int burnt, valid, key, key_state, buf_len = 0, upper_key_used = 0;
const char *status;
key_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
MLXBF_BOOTCTL_FUSE_STATUS_KEYS);
if (key_state < 0)
return key_state;
/*
* key_state contains the bits for 4 Key versions, loaded from eFuses
* after a hard reset. Lower 4 bits are a thermometer code indicating
* key programming has started for key n (0000 = none, 0001 = version 0,
* 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
* are a thermometer code indicating key programming has completed for
* key n (same encodings as the start bits). This allows for detection
* of an interruption in the programming process which has left the key
* partially programmed (and thus invalid). The process is to burn the
* eFuse for the new key start bit, burn the key eFuses, then burn the
* eFuse for the new key complete bit.
*
* For example 0000_0000: no key valid, 0001_0001: key version 0 valid,
* 0011_0011: key 1 version valid, 0011_0111: key version 2 started
* programming but did not complete, etc. The most recent key for which
* both start and complete bit is set is loaded. On soft reset, this
* register is not modified.
*/
for (key = MLXBF_SB_KEY_NUM - 1; key >= 0; key--) {
burnt = key_state & BIT(key);
valid = key_state & BIT(key + MLXBF_SB_KEY_NUM);
if (burnt && valid)
upper_key_used = 1;
if (upper_key_used) {
if (burnt)
status = valid ? "Used" : "Wasted";
else
status = valid ? "Invalid" : "Skipped";
} else {
if (burnt)
status = valid ? "InUse" : "Incomplete";
else
status = valid ? "Invalid" : "Free";
}
buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
}
buf_len += sprintf(buf + buf_len, "\n");
return buf_len;
}
static ssize_t fw_reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long key;
int err;
err = kstrtoul(buf, 16, &key);
if (err)
return err;
if (mlxbf_bootctl_smc(MLXBF_BOOTCTL_FW_RESET, key) < 0)
return -EINVAL;
return count;
}
/* Size(8-byte words) of the log buffer. */
#define RSH_SCRATCH_BUF_CTL_IDX_MASK 0x7f
/* 100ms timeout */
#define RSH_SCRATCH_BUF_POLL_TIMEOUT 100000
static int mlxbf_rsh_log_sem_lock(void)
{
unsigned long reg;
return readq_poll_timeout(mlxbf_rsh_semaphore, reg, !reg, 0,
RSH_SCRATCH_BUF_POLL_TIMEOUT);
}
static void mlxbf_rsh_log_sem_unlock(void)
{
writeq(0, mlxbf_rsh_semaphore);
}
static ssize_t rsh_log_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc, idx, num, len, level = MLXBF_RSH_LOG_INFO;
size_t size = count;
u64 data;
if (!size)
return -EINVAL;
if (!mlxbf_rsh_semaphore || !mlxbf_rsh_scratch_buf_ctl)
return -EOPNOTSUPP;
/* Ignore line break at the end. */
if (buf[size - 1] == '\n')
size--;
/* Check the message prefix. */
for (idx = 0; idx < ARRAY_SIZE(mlxbf_rsh_log_level); idx++) {
len = strlen(mlxbf_rsh_log_level[idx]);
if (len + 1 < size &&
!strncmp(buf, mlxbf_rsh_log_level[idx], len)) {
buf += len;
size -= len;
level = idx;
break;
}
}
/* Ignore leading spaces. */
while (size > 0 && buf[0] == ' ') {
size--;
buf++;
}
/* Take the semaphore. */
rc = mlxbf_rsh_log_sem_lock();
if (rc)
return rc;
/* Calculate how many words are available. */
idx = readq(mlxbf_rsh_scratch_buf_ctl);
num = min((int)DIV_ROUND_UP(size, sizeof(u64)),
RSH_SCRATCH_BUF_CTL_IDX_MASK - idx - 1);
if (num <= 0)
goto done;
/* Write Header. */
data = FIELD_PREP(MLXBF_RSH_LOG_TYPE_MASK, MLXBF_RSH_LOG_TYPE_MSG);
data |= FIELD_PREP(MLXBF_RSH_LOG_LEN_MASK, num);
data |= FIELD_PREP(MLXBF_RSH_LOG_LEVEL_MASK, level);
writeq(data, mlxbf_rsh_scratch_buf_data);
/* Write message. */
for (idx = 0; idx < num && size > 0; idx++) {
if (size < sizeof(u64)) {
data = 0;
memcpy(&data, buf, size);
size = 0;
} else {
memcpy(&data, buf, sizeof(u64));
size -= sizeof(u64);
buf += sizeof(u64);
}
writeq(data, mlxbf_rsh_scratch_buf_data);
}
done:
/* Release the semaphore. */
mlxbf_rsh_log_sem_unlock();
/* Ignore the rest if no more space. */
return count;
}
static ssize_t large_icm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_smccc_res res;
mutex_lock(&icm_ops_lock);
arm_smccc_smc(MLNX_HANDLE_GET_ICM_INFO, 0, 0, 0, 0,
0, 0, 0, &res);
mutex_unlock(&icm_ops_lock);
if (res.a0)
return -EPERM;
return snprintf(buf, PAGE_SIZE, "0x%lx", res.a1);
}
static ssize_t large_icm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct arm_smccc_res res;
unsigned long icm_data;
int err;
err = kstrtoul(buf, MLXBF_LARGE_ICMC_MAX_STRING_SIZE, &icm_data);
if (err)
return err;
if ((icm_data != 0 && icm_data < MLXBF_LARGE_ICMC_SIZE_MIN) ||
icm_data > MLXBF_LARGE_ICMC_SIZE_MAX || icm_data % MLXBF_LARGE_ICMC_GRANULARITY)
return -EPERM;
mutex_lock(&icm_ops_lock);
arm_smccc_smc(MLNX_HANDLE_SET_ICM_INFO, icm_data, 0, 0, 0, 0, 0, 0, &res);
mutex_unlock(&icm_ops_lock);
return res.a0 ? -EPERM : count;
}
static ssize_t os_up_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct arm_smccc_res res;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val != 1)
return -EINVAL;
mutex_lock(&os_up_lock);
arm_smccc_smc(MLNX_HANDLE_OS_UP, 0, 0, 0, 0, 0, 0, 0, &res);
mutex_unlock(&os_up_lock);
return count;
}
static ssize_t oob_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_smccc_res res;
u8 *mac_byte_ptr;
mutex_lock(&mfg_ops_lock);
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO, MLNX_MFG_TYPE_OOB_MAC, 0, 0, 0,
0, 0, 0, &res);
mutex_unlock(&mfg_ops_lock);
if (res.a0)
return -EPERM;
mac_byte_ptr = (u8 *)&res.a1;
return sysfs_format_mac(buf, mac_byte_ptr, ETH_ALEN);
}
static ssize_t oob_mac_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int byte[MLNX_MFG_OOB_MAC_FORMAT_LEN] = { 0 };
struct arm_smccc_res res;
int byte_idx, len;
u64 mac_addr = 0;
u8 *mac_byte_ptr;
if ((count - 1) != MLNX_MFG_OOB_MAC_FORMAT_LEN)
return -EINVAL;
len = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
&byte[0], &byte[1], &byte[2],
&byte[3], &byte[4], &byte[5]);
if (len != ETH_ALEN)
return -EINVAL;
mac_byte_ptr = (u8 *)&mac_addr;
for (byte_idx = 0; byte_idx < ETH_ALEN; byte_idx++)
mac_byte_ptr[byte_idx] = (u8)byte[byte_idx];
mutex_lock(&mfg_ops_lock);
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO, MLNX_MFG_TYPE_OOB_MAC,
ETH_ALEN, mac_addr, 0, 0, 0, 0, &res);
mutex_unlock(&mfg_ops_lock);
return res.a0 ? -EPERM : count;
}
static ssize_t opn_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 opn_data[MLNX_MFG_VAL_QWORD_CNT(OPN) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(OPN); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_OPN_0 + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
opn_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)opn_data);
}
static ssize_t opn_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 opn[MLNX_MFG_VAL_QWORD_CNT(OPN)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_OPN_VAL_LEN)
return -EINVAL;
memcpy(opn, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(OPN); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_OPN_0 + word,
sizeof(u64), opn[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t sku_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 sku_data[MLNX_MFG_VAL_QWORD_CNT(SKU) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SKU); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_SKU_0 + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
sku_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)sku_data);
}
static ssize_t sku_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 sku[MLNX_MFG_VAL_QWORD_CNT(SKU)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_SKU_VAL_LEN)
return -EINVAL;
memcpy(sku, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SKU); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_SKU_0 + word,
sizeof(u64), sku[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t modl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 modl_data[MLNX_MFG_VAL_QWORD_CNT(MODL) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(MODL); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_MODL_0 + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
modl_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)modl_data);
}
static ssize_t modl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 modl[MLNX_MFG_VAL_QWORD_CNT(MODL)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_MODL_VAL_LEN)
return -EINVAL;
memcpy(modl, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(MODL); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_MODL_0 + word,
sizeof(u64), modl[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t sn_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 sn_data[MLNX_MFG_VAL_QWORD_CNT(SN) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SN); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_SN_0 + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
sn_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)sn_data);
}
static ssize_t sn_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 sn[MLNX_MFG_VAL_QWORD_CNT(SN)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_SN_VAL_LEN)
return -EINVAL;
memcpy(sn, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SN); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_SN_0 + word,
sizeof(u64), sn[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t uuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 uuid_data[MLNX_MFG_VAL_QWORD_CNT(UUID) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(UUID); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_UUID_0 + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
uuid_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)uuid_data);
}
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 uuid[MLNX_MFG_VAL_QWORD_CNT(UUID)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_UUID_VAL_LEN)
return -EINVAL;
memcpy(uuid, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(UUID); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_UUID_0 + word,
sizeof(u64), uuid[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t rev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u64 rev_data[MLNX_MFG_VAL_QWORD_CNT(REV) + 1] = { 0 };
struct arm_smccc_res res;
int word;
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(REV); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
MLNX_MFG_TYPE_REV + word,
0, 0, 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
rev_data[word] = res.a1;
}
mutex_unlock(&mfg_ops_lock);
return snprintf(buf, PAGE_SIZE, "%s", (char *)rev_data);
}
static ssize_t rev_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 rev[MLNX_MFG_VAL_QWORD_CNT(REV)] = { 0 };
struct arm_smccc_res res;
int word;
if (count > MLNX_MFG_REV_VAL_LEN)
return -EINVAL;
memcpy(rev, buf, count);
mutex_lock(&mfg_ops_lock);
for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(REV); word++) {
arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
MLNX_MFG_TYPE_REV + word,
sizeof(u64), rev[word], 0, 0, 0, 0, &res);
if (res.a0) {
mutex_unlock(&mfg_ops_lock);
return -EPERM;
}
}
mutex_unlock(&mfg_ops_lock);
return count;
}
static ssize_t mfg_lock_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct arm_smccc_res res;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val != 1)
return -EINVAL;
mutex_lock(&mfg_ops_lock);
arm_smccc_smc(MLXBF_BOOTCTL_LOCK_MFG_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
mutex_unlock(&mfg_ops_lock);
return count;
}
static DEVICE_ATTR_RW(post_reset_wdog);
static DEVICE_ATTR_RW(reset_action);
static DEVICE_ATTR_RW(second_reset_action);
static DEVICE_ATTR_RO(lifecycle_state);
static DEVICE_ATTR_RO(secure_boot_fuse_state);
static DEVICE_ATTR_WO(fw_reset);
static DEVICE_ATTR_WO(rsh_log);
static DEVICE_ATTR_RW(large_icm);
static DEVICE_ATTR_WO(os_up);
static DEVICE_ATTR_RW(oob_mac);
static DEVICE_ATTR_RW(opn);
static DEVICE_ATTR_RW(sku);
static DEVICE_ATTR_RW(modl);
static DEVICE_ATTR_RW(sn);
static DEVICE_ATTR_RW(uuid);
static DEVICE_ATTR_RW(rev);
static DEVICE_ATTR_WO(mfg_lock);
static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_post_reset_wdog.attr,
&dev_attr_reset_action.attr,
&dev_attr_second_reset_action.attr,
&dev_attr_lifecycle_state.attr,
&dev_attr_secure_boot_fuse_state.attr,
&dev_attr_fw_reset.attr,
&dev_attr_rsh_log.attr,
&dev_attr_large_icm.attr,
&dev_attr_os_up.attr,
&dev_attr_oob_mac.attr,
&dev_attr_opn.attr,
&dev_attr_sku.attr,
&dev_attr_modl.attr,
&dev_attr_sn.attr,
&dev_attr_uuid.attr,
&dev_attr_rev.attr,
&dev_attr_mfg_lock.attr,
NULL
};
ATTRIBUTE_GROUPS(mlxbf_bootctl);
static const struct acpi_device_id mlxbf_bootctl_acpi_ids[] = {
{"MLNXBF04", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
static ssize_t mlxbf_bootctl_bootfifo_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos,
size_t count)
{
unsigned long timeout = msecs_to_jiffies(500);
unsigned long expire = jiffies + timeout;
u64 data, cnt = 0;
char *p = buf;
while (count >= sizeof(data)) {
/* Give up reading if no more data within 500ms. */
if (!cnt) {
cnt = readq(mlxbf_rsh_boot_cnt);
if (!cnt) {
if (time_after(jiffies, expire))
break;
usleep_range(10, 50);
continue;
}
}
data = readq(mlxbf_rsh_boot_data);
memcpy(p, &data, sizeof(data));
count -= sizeof(data);
p += sizeof(data);
cnt--;
expire = jiffies + timeout;
}
return p - buf;
}
static struct bin_attribute mlxbf_bootctl_bootfifo_sysfs_attr = {
.attr = { .name = "bootfifo", .mode = 0400 },
.read = mlxbf_bootctl_bootfifo_read,
};
static bool mlxbf_bootctl_guid_match(const guid_t *guid,
const struct arm_smccc_res *res)
{
guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16,
res->a2, res->a2 >> 8, res->a2 >> 16,
res->a2 >> 24, res->a3, res->a3 >> 8,
res->a3 >> 16, res->a3 >> 24);
return guid_equal(guid, &id);
}
static int mlxbf_bootctl_probe(struct platform_device *pdev)
{
struct arm_smccc_res res = { 0 };
void __iomem *reg;
guid_t guid;
int ret;
/* Map the resource of the bootfifo data register. */
mlxbf_rsh_boot_data = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mlxbf_rsh_boot_data))
return PTR_ERR(mlxbf_rsh_boot_data);
/* Map the resource of the bootfifo counter register. */
mlxbf_rsh_boot_cnt = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mlxbf_rsh_boot_cnt))
return PTR_ERR(mlxbf_rsh_boot_cnt);
/* Map the resource of the rshim semaphore register. */
mlxbf_rsh_semaphore = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(mlxbf_rsh_semaphore))
return PTR_ERR(mlxbf_rsh_semaphore);
/* Map the resource of the scratch buffer (log) registers. */
reg = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(reg))
return PTR_ERR(reg);
mlxbf_rsh_scratch_buf_ctl = reg + MLXBF_RSH_SCRATCH_BUF_CTL_OFF;
mlxbf_rsh_scratch_buf_data = reg + MLXBF_RSH_SCRATCH_BUF_DATA_OFF;
/* Ensure we have the UUID we expect for this service. */
arm_smccc_smc(MLXBF_BOOTCTL_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
guid_parse(mlxbf_bootctl_svc_uuid_str, &guid);
if (!mlxbf_bootctl_guid_match(&guid, &res))
return -ENODEV;
/*
* When watchdog is used, it sets boot mode to MLXBF_BOOTCTL_SWAP_EMMC
* in case of boot failures. However it doesn't clear the state if there
* is no failure. Restore the default boot mode here to avoid any
* unnecessary boot partition swapping.
*/
ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_RESET_ACTION,
MLXBF_BOOTCTL_EMMC);
if (ret < 0)
dev_warn(&pdev->dev, "Unable to reset the EMMC boot mode\n");
ret = sysfs_create_bin_file(&pdev->dev.kobj,
&mlxbf_bootctl_bootfifo_sysfs_attr);
if (ret)
pr_err("Unable to create bootfifo sysfs file, error %d\n", ret);
return ret;
}
static int mlxbf_bootctl_remove(struct platform_device *pdev)
{
sysfs_remove_bin_file(&pdev->dev.kobj,
&mlxbf_bootctl_bootfifo_sysfs_attr);
return 0;
}
static struct platform_driver mlxbf_bootctl_driver = {
.probe = mlxbf_bootctl_probe,
.remove = mlxbf_bootctl_remove,
.driver = {
.name = "mlxbf-bootctl",
.dev_groups = mlxbf_bootctl_groups,
.acpi_match_table = mlxbf_bootctl_acpi_ids,
}
};
module_platform_driver(mlxbf_bootctl_driver);
MODULE_DESCRIPTION("Mellanox boot control driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mellanox Technologies");
| linux-master | drivers/platform/mellanox/mlxbf-bootctl.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox hotplug driver
*
* Copyright (C) 2016-2020 Mellanox Technologies
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/string_helpers.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
/* Offset of event and mask registers from status register. */
#define MLXREG_HOTPLUG_EVENT_OFF 1
#define MLXREG_HOTPLUG_MASK_OFF 2
#define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
/* ASIC good health mask. */
#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
#define MLXREG_HOTPLUG_ATTRS_MAX 128
#define MLXREG_HOTPLUG_NOT_ASSERT 3
/**
* struct mlxreg_hotplug_priv_data - platform private data:
* @irq: platform device interrupt number;
* @dev: basic device;
* @pdev: platform device;
* @plat: platform data;
* @regmap: register map handle;
* @dwork_irq: delayed work template;
* @lock: spin lock;
* @hwmon: hwmon device;
* @mlxreg_hotplug_attr: sysfs attributes array;
* @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
* @group: sysfs attribute group;
* @groups: list of sysfs attribute group for hwmon registration;
* @cell: location of top aggregation interrupt register;
* @mask: top aggregation interrupt common mask;
* @aggr_cache: last value of aggregation register status;
* @after_probe: flag indication probing completion;
* @not_asserted: number of entries in workqueue with no signal assertion;
*/
struct mlxreg_hotplug_priv_data {
int irq;
struct device *dev;
struct platform_device *pdev;
struct mlxreg_hotplug_platform_data *plat;
struct regmap *regmap;
struct delayed_work dwork_irq;
spinlock_t lock; /* sync with interrupt */
struct device *hwmon;
struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
struct sensor_device_attribute_2
mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
struct attribute_group group;
const struct attribute_group *groups[2];
u32 cell;
u32 mask;
u32 aggr_cache;
bool after_probe;
u8 not_asserted;
};
/* Environment variables array for udev. */
static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
static int
mlxreg_hotplug_udev_event_send(struct kobject *kobj,
struct mlxreg_core_data *data, bool action)
{
char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
mlxreg_hotplug_udev_envp[0] = event_str;
string_upper(label, data->label);
snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
}
static void
mlxreg_hotplug_pdata_export(void *pdata, void *regmap)
{
struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata;
/* Export regmap to underlying device. */
dev_pdata->regmap = regmap;
}
static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_data *data,
enum mlxreg_hotplug_kind kind)
{
struct i2c_board_info *brdinfo = data->hpdev.brdinfo;
struct mlxreg_core_hotplug_platform_data *pdata;
struct i2c_client *client;
/* Notify user by sending hwmon uevent. */
mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
/*
* Return if adapter number is negative. It could be in case hotplug
* event is not associated with hotplug device.
*/
if (data->hpdev.nr < 0 && data->hpdev.action != MLXREG_HOTPLUG_DEVICE_NO_ACTION)
return 0;
pdata = dev_get_platdata(&priv->pdev->dev);
switch (data->hpdev.action) {
case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
pdata->shift_nr);
if (!data->hpdev.adapter) {
dev_err(priv->dev, "Failed to get adapter for bus %d\n",
data->hpdev.nr + pdata->shift_nr);
return -EFAULT;
}
/* Export platform data to underlying device. */
if (brdinfo->platform_data)
mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap);
client = i2c_new_client_device(data->hpdev.adapter,
brdinfo);
if (IS_ERR(client)) {
dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
brdinfo->type, data->hpdev.nr +
pdata->shift_nr, brdinfo->addr);
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
return PTR_ERR(client);
}
data->hpdev.client = client;
break;
case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
/* Export platform data to underlying device. */
if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data)
mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data,
pdata->regmap);
/* Pass parent hotplug device handle to underlying device. */
data->notifier = data->hpdev.notifier;
data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev,
brdinfo->type,
data->hpdev.nr,
NULL, 0, data,
sizeof(*data));
if (IS_ERR(data->hpdev.pdev))
return PTR_ERR(data->hpdev.pdev);
break;
default:
break;
}
if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1);
return 0;
}
static void
mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_data *data,
enum mlxreg_hotplug_kind kind)
{
/* Notify user by sending hwmon uevent. */
mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0);
switch (data->hpdev.action) {
case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
if (data->hpdev.client) {
i2c_unregister_device(data->hpdev.client);
data->hpdev.client = NULL;
}
if (data->hpdev.adapter) {
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
}
break;
case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
if (data->hpdev.pdev)
platform_device_unregister(data->hpdev.pdev);
break;
default:
break;
}
}
static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
struct mlxreg_core_hotplug_platform_data *pdata;
int index = to_sensor_dev_attr_2(attr)->index;
int nr = to_sensor_dev_attr_2(attr)->nr;
struct mlxreg_core_item *item;
struct mlxreg_core_data *data;
u32 regval;
int ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items + nr;
data = item->data + index;
ret = regmap_read(priv->regmap, data->reg, ®val);
if (ret)
return ret;
if (item->health) {
regval &= data->mask;
} else {
/* Bit = 0 : functional if item->inversed is true. */
if (item->inversed)
regval = !(regval & data->mask);
else
regval = !!(regval & data->mask);
}
return sprintf(buf, "%u\n", regval);
}
#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
#define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
static int mlxreg_hotplug_item_label_index_get(u32 mask, u32 bit)
{
int i, j;
for (i = 0, j = -1; i <= bit; i++) {
if (mask & BIT(i))
j++;
}
return j;
}
static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_core_item *item;
struct mlxreg_core_data *data;
unsigned long mask;
u32 regval;
int num_attrs = 0, id = 0, i, j, k, count, ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
/* Go over all kinds of items - psu, pwr, fan. */
for (i = 0; i < pdata->counter; i++, item++) {
if (item->capability) {
/*
* Read group capability register to get actual number
* of interrupt capable components and set group mask
* accordingly.
*/
ret = regmap_read(priv->regmap, item->capability,
®val);
if (ret)
return ret;
item->mask = GENMASK((regval & item->mask) - 1, 0);
}
data = item->data;
/* Go over all unmasked units within item. */
mask = item->mask;
k = 0;
count = item->ind ? item->ind : item->count;
for_each_set_bit(j, &mask, count) {
if (data->capability) {
/*
* Read capability register and skip non
* relevant attributes.
*/
ret = regmap_read(priv->regmap,
data->capability, ®val);
if (ret)
return ret;
if (!(regval & data->bit)) {
data++;
continue;
}
}
PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
GFP_KERNEL,
data->label);
if (!PRIV_ATTR(id)->name) {
dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
id);
return -ENOMEM;
}
PRIV_DEV_ATTR(id).dev_attr.attr.name =
PRIV_ATTR(id)->name;
PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
PRIV_DEV_ATTR(id).dev_attr.show =
mlxreg_hotplug_attr_show;
PRIV_DEV_ATTR(id).nr = i;
PRIV_DEV_ATTR(id).index = k;
sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
data++;
id++;
k++;
}
num_attrs += k;
}
priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
num_attrs,
sizeof(struct attribute *),
GFP_KERNEL);
if (!priv->group.attrs)
return -ENOMEM;
priv->group.attrs = priv->mlxreg_hotplug_attr;
priv->groups[0] = &priv->group;
priv->groups[1] = NULL;
return 0;
}
static void
mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_item *item)
{
struct mlxreg_core_data *data;
unsigned long asserted;
u32 regval, bit;
int ret;
/*
* Validate if item related to received signal type is valid.
* It should never happen, excepted the situation when some
* piece of hardware is broken. In such situation just produce
* error message and return. Caller must continue to handle the
* signals from other devices if any.
*/
if (unlikely(!item)) {
dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
item->reg, item->mask);
return;
}
/* Mask event. */
ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
0);
if (ret)
goto out;
/* Read status. */
ret = regmap_read(priv->regmap, item->reg, ®val);
if (ret)
goto out;
/* Set asserted bits and save last status. */
regval &= item->mask;
asserted = item->cache ^ regval;
item->cache = regval;
for_each_set_bit(bit, &asserted, 8) {
int pos;
pos = mlxreg_hotplug_item_label_index_get(item->mask, bit);
if (pos < 0)
goto out;
data = item->data + pos;
if (regval & BIT(bit)) {
if (item->inversed)
mlxreg_hotplug_device_destroy(priv, data, item->kind);
else
mlxreg_hotplug_device_create(priv, data, item->kind);
} else {
if (item->inversed)
mlxreg_hotplug_device_create(priv, data, item->kind);
else
mlxreg_hotplug_device_destroy(priv, data, item->kind);
}
}
/* Acknowledge event. */
ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
0);
if (ret)
goto out;
/* Unmask event. */
ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
item->mask);
out:
if (ret)
dev_err(priv->dev, "Failed to complete workqueue.\n");
}
static void
mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
struct mlxreg_core_item *item)
{
struct mlxreg_core_data *data = item->data;
u32 regval;
int i, ret = 0;
for (i = 0; i < item->count; i++, data++) {
/* Mask event. */
ret = regmap_write(priv->regmap, data->reg +
MLXREG_HOTPLUG_MASK_OFF, 0);
if (ret)
goto out;
/* Read status. */
ret = regmap_read(priv->regmap, data->reg, ®val);
if (ret)
goto out;
regval &= data->mask;
if (item->cache == regval)
goto ack_event;
/*
* ASIC health indication is provided through two bits. Bits
* value 0x2 indicates that ASIC reached the good health, value
* 0x0 indicates ASIC the bad health or dormant state and value
* 0x3 indicates the booting state. During ASIC reset it should
* pass the following states: dormant -> booting -> good.
*/
if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
if (!data->attached) {
/*
* ASIC is in steady state. Connect associated
* device, if configured.
*/
mlxreg_hotplug_device_create(priv, data, item->kind);
data->attached = true;
}
} else {
if (data->attached) {
/*
* ASIC health is failed after ASIC has been
* in steady state. Disconnect associated
* device, if it has been connected.
*/
mlxreg_hotplug_device_destroy(priv, data, item->kind);
data->attached = false;
data->health_cntr = 0;
}
}
item->cache = regval;
ack_event:
/* Acknowledge event. */
ret = regmap_write(priv->regmap, data->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
if (ret)
goto out;
/* Unmask event. */
ret = regmap_write(priv->regmap, data->reg +
MLXREG_HOTPLUG_MASK_OFF, data->mask);
if (ret)
goto out;
}
out:
if (ret)
dev_err(priv->dev, "Failed to complete workqueue.\n");
}
/*
* mlxreg_hotplug_work_handler - performs traversing of device interrupt
* registers according to the below hierarchy schema:
*
* Aggregation registers (status/mask)
* PSU registers: *---*
* *-----------------* | |
* |status/event/mask|-----> | * |
* *-----------------* | |
* Power registers: | |
* *-----------------* | |
* |status/event/mask|-----> | * |
* *-----------------* | |
* FAN registers: | |--> CPU
* *-----------------* | |
* |status/event/mask|-----> | * |
* *-----------------* | |
* ASIC registers: | |
* *-----------------* | |
* |status/event/mask|-----> | * |
* *-----------------* | |
* *---*
*
* In case some system changed are detected: FAN in/out, PSU in/out, power
* cable attached/detached, ASIC health good/bad, relevant device is created
* or destroyed.
*/
static void mlxreg_hotplug_work_handler(struct work_struct *work)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_hotplug_priv_data *priv;
struct mlxreg_core_item *item;
u32 regval, aggr_asserted;
unsigned long flags;
int i, ret;
priv = container_of(work, struct mlxreg_hotplug_priv_data,
dwork_irq.work);
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
/* Mask aggregation event. */
ret = regmap_write(priv->regmap, pdata->cell +
MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
if (ret < 0)
goto out;
/* Read aggregation status. */
ret = regmap_read(priv->regmap, pdata->cell, ®val);
if (ret)
goto out;
regval &= pdata->mask;
aggr_asserted = priv->aggr_cache ^ regval;
priv->aggr_cache = regval;
/*
* Handler is invoked, but no assertion is detected at top aggregation
* status level. Set aggr_asserted to mask value to allow handler extra
* run over all relevant signals to recover any missed signal.
*/
if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
priv->not_asserted = 0;
aggr_asserted = pdata->mask;
}
if (!aggr_asserted)
goto unmask_event;
/* Handle topology and health configuration changes. */
for (i = 0; i < pdata->counter; i++, item++) {
if (aggr_asserted & item->aggr_mask) {
if (item->health)
mlxreg_hotplug_health_work_helper(priv, item);
else
mlxreg_hotplug_work_helper(priv, item);
}
}
spin_lock_irqsave(&priv->lock, flags);
/*
* It is possible, that some signals have been inserted, while
* interrupt has been masked by mlxreg_hotplug_work_handler. In this
* case such signals will be missed. In order to handle these signals
* delayed work is canceled and work task re-scheduled for immediate
* execution. It allows to handle missed signals, if any. In other case
* work handler just validates that no new signals have been received
* during masking.
*/
cancel_delayed_work(&priv->dwork_irq);
schedule_delayed_work(&priv->dwork_irq, 0);
spin_unlock_irqrestore(&priv->lock, flags);
return;
unmask_event:
priv->not_asserted++;
/* Unmask aggregation event (no need acknowledge). */
ret = regmap_write(priv->regmap, pdata->cell +
MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
out:
if (ret)
dev_err(priv->dev, "Failed to complete workqueue.\n");
}
static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_core_item *item;
struct mlxreg_core_data *data;
u32 regval;
int i, j, ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
for (i = 0; i < pdata->counter; i++, item++) {
/* Clear group presense event. */
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
if (ret)
goto out;
/*
* Verify if hardware configuration requires to disable
* interrupt capability for some of components.
*/
data = item->data;
for (j = 0; j < item->count; j++, data++) {
/* Verify if the attribute has capability register. */
if (data->capability) {
/* Read capability register. */
ret = regmap_read(priv->regmap,
data->capability, ®val);
if (ret)
goto out;
if (!(regval & data->bit))
item->mask &= ~BIT(j);
}
}
/* Set group initial status as mask and unmask group event. */
if (item->inversed) {
item->cache = item->mask;
ret = regmap_write(priv->regmap, item->reg +
MLXREG_HOTPLUG_MASK_OFF,
item->mask);
if (ret)
goto out;
}
}
/* Keep aggregation initial status as zero and unmask events. */
ret = regmap_write(priv->regmap, pdata->cell +
MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
if (ret)
goto out;
/* Keep low aggregation initial status as zero and unmask events. */
if (pdata->cell_low) {
ret = regmap_write(priv->regmap, pdata->cell_low +
MLXREG_HOTPLUG_AGGR_MASK_OFF,
pdata->mask_low);
if (ret)
goto out;
}
/* Invoke work handler for initializing hot plug devices setting. */
mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
out:
if (ret)
dev_err(priv->dev, "Failed to set interrupts.\n");
enable_irq(priv->irq);
return ret;
}
static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_core_item *item;
struct mlxreg_core_data *data;
int count, i, j;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
disable_irq(priv->irq);
cancel_delayed_work_sync(&priv->dwork_irq);
/* Mask low aggregation event, if defined. */
if (pdata->cell_low)
regmap_write(priv->regmap, pdata->cell_low +
MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
/* Mask aggregation event. */
regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
0);
/* Clear topology configurations. */
for (i = 0; i < pdata->counter; i++, item++) {
data = item->data;
/* Mask group presense event. */
regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
0);
/* Clear group presense event. */
regmap_write(priv->regmap, data->reg +
MLXREG_HOTPLUG_EVENT_OFF, 0);
/* Remove all the attached devices in group. */
count = item->count;
for (j = 0; j < count; j++, data++)
mlxreg_hotplug_device_destroy(priv, data, item->kind);
}
}
static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
{
struct mlxreg_hotplug_priv_data *priv;
priv = (struct mlxreg_hotplug_priv_data *)dev;
/* Schedule work task for immediate execution.*/
schedule_delayed_work(&priv->dwork_irq, 0);
return IRQ_HANDLED;
}
static int mlxreg_hotplug_probe(struct platform_device *pdev)
{
struct mlxreg_core_hotplug_platform_data *pdata;
struct mlxreg_hotplug_priv_data *priv;
struct i2c_adapter *deferred_adap;
int err;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "Failed to get platform data.\n");
return -EINVAL;
}
/* Defer probing if the necessary adapter is not configured yet. */
deferred_adap = i2c_get_adapter(pdata->deferred_nr);
if (!deferred_adap)
return -EPROBE_DEFER;
i2c_put_adapter(deferred_adap);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (pdata->irq) {
priv->irq = pdata->irq;
} else {
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0)
return priv->irq;
}
priv->regmap = pdata->regmap;
priv->dev = pdev->dev.parent;
priv->pdev = pdev;
err = devm_request_irq(&pdev->dev, priv->irq,
mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
| IRQF_SHARED, "mlxreg-hotplug", priv);
if (err) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
return err;
}
disable_irq(priv->irq);
spin_lock_init(&priv->lock);
INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
dev_set_drvdata(&pdev->dev, priv);
err = mlxreg_hotplug_attr_init(priv);
if (err) {
dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
err);
return err;
}
priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
"mlxreg_hotplug", priv, priv->groups);
if (IS_ERR(priv->hwmon)) {
dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
PTR_ERR(priv->hwmon));
return PTR_ERR(priv->hwmon);
}
/* Perform initial interrupts setup. */
mlxreg_hotplug_set_irq(priv);
priv->after_probe = true;
return 0;
}
static int mlxreg_hotplug_remove(struct platform_device *pdev)
{
struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
/* Clean interrupts setup. */
mlxreg_hotplug_unset_irq(priv);
devm_free_irq(&pdev->dev, priv->irq, priv);
return 0;
}
static struct platform_driver mlxreg_hotplug_driver = {
.driver = {
.name = "mlxreg-hotplug",
},
.probe = mlxreg_hotplug_probe,
.remove = mlxreg_hotplug_remove,
};
module_platform_driver(mlxreg_hotplug_driver);
MODULE_AUTHOR("Vadim Pasternak <[email protected]>");
MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("platform:mlxreg-hotplug");
| linux-master | drivers/platform/mellanox/mlxreg-hotplug.c |
// SPDX-License-Identifier: GPL-2.0-only OR Linux-OpenIB
/*
* Mellanox BlueField Performance Monitoring Counters driver
*
* This driver provides a sysfs interface for monitoring
* performance statistics in BlueField SoC.
*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/errno.h>
#include <linux/hwmon.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <uapi/linux/psci.h>
#define MLXBF_PMC_WRITE_REG_32 0x82000009
#define MLXBF_PMC_READ_REG_32 0x8200000A
#define MLXBF_PMC_WRITE_REG_64 0x8200000B
#define MLXBF_PMC_READ_REG_64 0x8200000C
#define MLXBF_PMC_SIP_SVC_UID 0x8200ff01
#define MLXBF_PMC_SIP_SVC_VERSION 0x8200ff03
#define MLXBF_PMC_SVC_REQ_MAJOR 0
#define MLXBF_PMC_SVC_MIN_MINOR 3
#define MLXBF_PMC_SMCCC_ACCESS_VIOLATION -4
#define MLXBF_PMC_EVENT_SET_BF1 0
#define MLXBF_PMC_EVENT_SET_BF2 1
#define MLXBF_PMC_EVENT_INFO_LEN 100
#define MLXBF_PMC_MAX_BLOCKS 30
#define MLXBF_PMC_MAX_ATTRS 30
#define MLXBF_PMC_INFO_SZ 4
#define MLXBF_PMC_REG_SIZE 8
#define MLXBF_PMC_L3C_REG_SIZE 4
#define MLXBF_PMC_TYPE_COUNTER 1
#define MLXBF_PMC_TYPE_REGISTER 0
#define MLXBF_PMC_PERFCTL 0
#define MLXBF_PMC_PERFEVT 1
#define MLXBF_PMC_PERFACC0 4
#define MLXBF_PMC_PERFMON_CONFIG_WR_R_B BIT(0)
#define MLXBF_PMC_PERFMON_CONFIG_STROBE BIT(1)
#define MLXBF_PMC_PERFMON_CONFIG_ADDR GENMASK_ULL(4, 2)
#define MLXBF_PMC_PERFMON_CONFIG_WDATA GENMASK_ULL(60, 5)
#define MLXBF_PMC_PERFCTL_FM0 GENMASK_ULL(18, 16)
#define MLXBF_PMC_PERFCTL_MS0 GENMASK_ULL(21, 20)
#define MLXBF_PMC_PERFCTL_ACCM0 GENMASK_ULL(26, 24)
#define MLXBF_PMC_PERFCTL_AD0 BIT(27)
#define MLXBF_PMC_PERFCTL_ETRIG0 GENMASK_ULL(29, 28)
#define MLXBF_PMC_PERFCTL_EB0 BIT(30)
#define MLXBF_PMC_PERFCTL_EN0 BIT(31)
#define MLXBF_PMC_PERFEVT_EVTSEL GENMASK_ULL(31, 24)
#define MLXBF_PMC_L3C_PERF_CNT_CFG 0x0
#define MLXBF_PMC_L3C_PERF_CNT_SEL 0x10
#define MLXBF_PMC_L3C_PERF_CNT_SEL_1 0x14
#define MLXBF_PMC_L3C_PERF_CNT_LOW 0x40
#define MLXBF_PMC_L3C_PERF_CNT_HIGH 0x60
#define MLXBF_PMC_L3C_PERF_CNT_CFG_EN BIT(0)
#define MLXBF_PMC_L3C_PERF_CNT_CFG_RST BIT(1)
#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0 GENMASK(5, 0)
#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1 GENMASK(13, 8)
#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2 GENMASK(21, 16)
#define MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3 GENMASK(29, 24)
#define MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4 GENMASK(5, 0)
#define MLXBF_PMC_L3C_PERF_CNT_LOW_VAL GENMASK(31, 0)
#define MLXBF_PMC_L3C_PERF_CNT_HIGH_VAL GENMASK(24, 0)
/**
* struct mlxbf_pmc_attribute - Structure to hold attribute and block info
* for each sysfs entry
* @dev_attr: Device attribute struct
* @index: index to identify counter number within a block
* @nr: block number to which the sysfs belongs
*/
struct mlxbf_pmc_attribute {
struct device_attribute dev_attr;
int index;
int nr;
};
/**
* struct mlxbf_pmc_block_info - Structure to hold info for each HW block
*
* @mmio_base: The VA at which the PMC block is mapped
* @blk_size: Size of each mapped region
* @counters: Number of counters in the block
* @type: Type of counters in the block
* @attr_counter: Attributes for "counter" sysfs files
* @attr_event: Attributes for "event" sysfs files
* @attr_event_list: Attributes for "event_list" sysfs files
* @attr_enable: Attributes for "enable" sysfs files
* @block_attr: All attributes needed for the block
* @block_attr_grp: Attribute group for the block
*/
struct mlxbf_pmc_block_info {
void __iomem *mmio_base;
size_t blk_size;
size_t counters;
int type;
struct mlxbf_pmc_attribute *attr_counter;
struct mlxbf_pmc_attribute *attr_event;
struct mlxbf_pmc_attribute attr_event_list;
struct mlxbf_pmc_attribute attr_enable;
struct attribute *block_attr[MLXBF_PMC_MAX_ATTRS];
struct attribute_group block_attr_grp;
};
/**
* struct mlxbf_pmc_context - Structure to hold PMC context info
*
* @pdev: The kernel structure representing the device
* @total_blocks: Total number of blocks
* @tile_count: Number of tiles in the system
* @hwmon_dev: Hwmon device for bfperf
* @block_name: Block name
* @block: Block info
* @groups: Attribute groups from each block
* @svc_sreg_support: Whether SMCs are used to access performance registers
* @sreg_tbl_perf: Secure register access table number
* @event_set: Event set to use
*/
struct mlxbf_pmc_context {
struct platform_device *pdev;
uint32_t total_blocks;
uint32_t tile_count;
struct device *hwmon_dev;
const char *block_name[MLXBF_PMC_MAX_BLOCKS];
struct mlxbf_pmc_block_info block[MLXBF_PMC_MAX_BLOCKS];
const struct attribute_group *groups[MLXBF_PMC_MAX_BLOCKS];
bool svc_sreg_support;
uint32_t sreg_tbl_perf;
unsigned int event_set;
};
/**
* struct mlxbf_pmc_events - Structure to hold supported events for each block
* @evt_num: Event number used to program counters
* @evt_name: Name of the event
*/
struct mlxbf_pmc_events {
int evt_num;
char *evt_name;
};
static const struct mlxbf_pmc_events mlxbf_pmc_pcie_events[] = {
{ 0x0, "IN_P_PKT_CNT" },
{ 0x10, "IN_NP_PKT_CNT" },
{ 0x18, "IN_C_PKT_CNT" },
{ 0x20, "OUT_P_PKT_CNT" },
{ 0x28, "OUT_NP_PKT_CNT" },
{ 0x30, "OUT_C_PKT_CNT" },
{ 0x38, "IN_P_BYTE_CNT" },
{ 0x40, "IN_NP_BYTE_CNT" },
{ 0x48, "IN_C_BYTE_CNT" },
{ 0x50, "OUT_P_BYTE_CNT" },
{ 0x58, "OUT_NP_BYTE_CNT" },
{ 0x60, "OUT_C_BYTE_CNT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_smgen_events[] = {
{ 0x0, "AW_REQ" },
{ 0x1, "AW_BEATS" },
{ 0x2, "AW_TRANS" },
{ 0x3, "AW_RESP" },
{ 0x4, "AW_STL" },
{ 0x5, "AW_LAT" },
{ 0x6, "AW_REQ_TBU" },
{ 0x8, "AR_REQ" },
{ 0x9, "AR_BEATS" },
{ 0xa, "AR_TRANS" },
{ 0xb, "AR_STL" },
{ 0xc, "AR_LAT" },
{ 0xd, "AR_REQ_TBU" },
{ 0xe, "TBU_MISS" },
{ 0xf, "TX_DAT_AF" },
{ 0x10, "RX_DAT_AF" },
{ 0x11, "RETRYQ_CRED" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_1[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
{ 0xa3, "TXMSG_DATA_BEAT" },
{ 0xa4, "TPIO_DATA_PACKET" },
{ 0xa5, "TDMA_DATA_PACKET" },
{ 0xa6, "MAP_DATA_PACKET" },
{ 0xa7, "TXMSG_DATA_PACKET" },
{ 0xa8, "TDMA_RT_AF" },
{ 0xa9, "TDMA_PBUF_MAC_AF" },
{ 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
{ 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
{ 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
{ 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
{ 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
{ 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
{ 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
{ 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
{ 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
{ 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_trio_events_2[] = {
{ 0x0, "DISABLE" },
{ 0xa0, "TPIO_DATA_BEAT" },
{ 0xa1, "TDMA_DATA_BEAT" },
{ 0xa2, "MAP_DATA_BEAT" },
{ 0xa3, "TXMSG_DATA_BEAT" },
{ 0xa4, "TPIO_DATA_PACKET" },
{ 0xa5, "TDMA_DATA_PACKET" },
{ 0xa6, "MAP_DATA_PACKET" },
{ 0xa7, "TXMSG_DATA_PACKET" },
{ 0xa8, "TDMA_RT_AF" },
{ 0xa9, "TDMA_PBUF_MAC_AF" },
{ 0xaa, "TRIO_MAP_WRQ_BUF_EMPTY" },
{ 0xab, "TRIO_MAP_CPL_BUF_EMPTY" },
{ 0xac, "TRIO_MAP_RDQ0_BUF_EMPTY" },
{ 0xad, "TRIO_MAP_RDQ1_BUF_EMPTY" },
{ 0xae, "TRIO_MAP_RDQ2_BUF_EMPTY" },
{ 0xaf, "TRIO_MAP_RDQ3_BUF_EMPTY" },
{ 0xb0, "TRIO_MAP_RDQ4_BUF_EMPTY" },
{ 0xb1, "TRIO_MAP_RDQ5_BUF_EMPTY" },
{ 0xb2, "TRIO_MAP_RDQ6_BUF_EMPTY" },
{ 0xb3, "TRIO_MAP_RDQ7_BUF_EMPTY" },
{ 0xb4, "TRIO_RING_TX_FLIT_CH0" },
{ 0xb5, "TRIO_RING_TX_FLIT_CH1" },
{ 0xb6, "TRIO_RING_TX_FLIT_CH2" },
{ 0xb7, "TRIO_RING_TX_FLIT_CH3" },
{ 0xb8, "TRIO_RING_TX_FLIT_CH4" },
{ 0xb9, "TRIO_RING_RX_FLIT_CH0" },
{ 0xba, "TRIO_RING_RX_FLIT_CH1" },
{ 0xbb, "TRIO_RING_RX_FLIT_CH2" },
{ 0xbc, "TRIO_RING_RX_FLIT_CH3" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_ecc_events[] = {
{ 0x0, "DISABLE" },
{ 0x100, "ECC_SINGLE_ERROR_CNT" },
{ 0x104, "ECC_DOUBLE_ERROR_CNT" },
{ 0x114, "SERR_INJ" },
{ 0x118, "DERR_INJ" },
{ 0x124, "ECC_SINGLE_ERROR_0" },
{ 0x164, "ECC_DOUBLE_ERROR_0" },
{ 0x340, "DRAM_ECC_COUNT" },
{ 0x344, "DRAM_ECC_INJECT" },
{ 0x348, "DRAM_ECC_ERROR" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_mss_events[] = {
{ 0x0, "DISABLE" },
{ 0xc0, "RXREQ_MSS" },
{ 0xc1, "RXDAT_MSS" },
{ 0xc2, "TXRSP_MSS" },
{ 0xc3, "TXDAT_MSS" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnf_events[] = {
{ 0x0, "DISABLE" },
{ 0x45, "HNF_REQUESTS" },
{ 0x46, "HNF_REJECTS" },
{ 0x47, "ALL_BUSY" },
{ 0x48, "MAF_BUSY" },
{ 0x49, "MAF_REQUESTS" },
{ 0x4a, "RNF_REQUESTS" },
{ 0x4b, "REQUEST_TYPE" },
{ 0x4c, "MEMORY_READS" },
{ 0x4d, "MEMORY_WRITES" },
{ 0x4e, "VICTIM_WRITE" },
{ 0x4f, "POC_FULL" },
{ 0x50, "POC_FAIL" },
{ 0x51, "POC_SUCCESS" },
{ 0x52, "POC_WRITES" },
{ 0x53, "POC_READS" },
{ 0x54, "FORWARD" },
{ 0x55, "RXREQ_HNF" },
{ 0x56, "RXRSP_HNF" },
{ 0x57, "RXDAT_HNF" },
{ 0x58, "TXREQ_HNF" },
{ 0x59, "TXRSP_HNF" },
{ 0x5a, "TXDAT_HNF" },
{ 0x5b, "TXSNP_HNF" },
{ 0x5c, "INDEX_MATCH" },
{ 0x5d, "A72_ACCESS" },
{ 0x5e, "IO_ACCESS" },
{ 0x5f, "TSO_WRITE" },
{ 0x60, "TSO_CONFLICT" },
{ 0x61, "DIR_HIT" },
{ 0x62, "HNF_ACCEPTS" },
{ 0x63, "REQ_BUF_EMPTY" },
{ 0x64, "REQ_BUF_IDLE_MAF" },
{ 0x65, "TSO_NOARB" },
{ 0x66, "TSO_NOARB_CYCLES" },
{ 0x67, "MSS_NO_CREDIT" },
{ 0x68, "TXDAT_NO_LCRD" },
{ 0x69, "TXSNP_NO_LCRD" },
{ 0x6a, "TXRSP_NO_LCRD" },
{ 0x6b, "TXREQ_NO_LCRD" },
{ 0x6c, "TSO_CL_MATCH" },
{ 0x6d, "MEMORY_READS_BYPASS" },
{ 0x6e, "TSO_NOARB_TIMEOUT" },
{ 0x6f, "ALLOCATE" },
{ 0x70, "VICTIM" },
{ 0x71, "A72_WRITE" },
{ 0x72, "A72_READ" },
{ 0x73, "IO_WRITE" },
{ 0x74, "IO_READ" },
{ 0x75, "TSO_REJECT" },
{ 0x80, "TXREQ_RN" },
{ 0x81, "TXRSP_RN" },
{ 0x82, "TXDAT_RN" },
{ 0x83, "RXSNP_RN" },
{ 0x84, "RXRSP_RN" },
{ 0x85, "RXDAT_RN" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_hnfnet_events[] = {
{ 0x0, "DISABLE" },
{ 0x12, "CDN_REQ" },
{ 0x13, "DDN_REQ" },
{ 0x14, "NDN_REQ" },
{ 0x15, "CDN_DIAG_N_OUT_OF_CRED" },
{ 0x16, "CDN_DIAG_S_OUT_OF_CRED" },
{ 0x17, "CDN_DIAG_E_OUT_OF_CRED" },
{ 0x18, "CDN_DIAG_W_OUT_OF_CRED" },
{ 0x19, "CDN_DIAG_C_OUT_OF_CRED" },
{ 0x1a, "CDN_DIAG_N_EGRESS" },
{ 0x1b, "CDN_DIAG_S_EGRESS" },
{ 0x1c, "CDN_DIAG_E_EGRESS" },
{ 0x1d, "CDN_DIAG_W_EGRESS" },
{ 0x1e, "CDN_DIAG_C_EGRESS" },
{ 0x1f, "CDN_DIAG_N_INGRESS" },
{ 0x20, "CDN_DIAG_S_INGRESS" },
{ 0x21, "CDN_DIAG_E_INGRESS" },
{ 0x22, "CDN_DIAG_W_INGRESS" },
{ 0x23, "CDN_DIAG_C_INGRESS" },
{ 0x24, "CDN_DIAG_CORE_SENT" },
{ 0x25, "DDN_DIAG_N_OUT_OF_CRED" },
{ 0x26, "DDN_DIAG_S_OUT_OF_CRED" },
{ 0x27, "DDN_DIAG_E_OUT_OF_CRED" },
{ 0x28, "DDN_DIAG_W_OUT_OF_CRED" },
{ 0x29, "DDN_DIAG_C_OUT_OF_CRED" },
{ 0x2a, "DDN_DIAG_N_EGRESS" },
{ 0x2b, "DDN_DIAG_S_EGRESS" },
{ 0x2c, "DDN_DIAG_E_EGRESS" },
{ 0x2d, "DDN_DIAG_W_EGRESS" },
{ 0x2e, "DDN_DIAG_C_EGRESS" },
{ 0x2f, "DDN_DIAG_N_INGRESS" },
{ 0x30, "DDN_DIAG_S_INGRESS" },
{ 0x31, "DDN_DIAG_E_INGRESS" },
{ 0x32, "DDN_DIAG_W_INGRESS" },
{ 0x33, "DDN_DIAG_C_INGRESS" },
{ 0x34, "DDN_DIAG_CORE_SENT" },
{ 0x35, "NDN_DIAG_N_OUT_OF_CRED" },
{ 0x36, "NDN_DIAG_S_OUT_OF_CRED" },
{ 0x37, "NDN_DIAG_E_OUT_OF_CRED" },
{ 0x38, "NDN_DIAG_W_OUT_OF_CRED" },
{ 0x39, "NDN_DIAG_C_OUT_OF_CRED" },
{ 0x3a, "NDN_DIAG_N_EGRESS" },
{ 0x3b, "NDN_DIAG_S_EGRESS" },
{ 0x3c, "NDN_DIAG_E_EGRESS" },
{ 0x3d, "NDN_DIAG_W_EGRESS" },
{ 0x3e, "NDN_DIAG_C_EGRESS" },
{ 0x3f, "NDN_DIAG_N_INGRESS" },
{ 0x40, "NDN_DIAG_S_INGRESS" },
{ 0x41, "NDN_DIAG_E_INGRESS" },
{ 0x42, "NDN_DIAG_W_INGRESS" },
{ 0x43, "NDN_DIAG_C_INGRESS" },
{ 0x44, "NDN_DIAG_CORE_SENT" },
};
static const struct mlxbf_pmc_events mlxbf_pmc_l3c_events[] = {
{ 0x00, "DISABLE" },
{ 0x01, "CYCLES" },
{ 0x02, "TOTAL_RD_REQ_IN" },
{ 0x03, "TOTAL_WR_REQ_IN" },
{ 0x04, "TOTAL_WR_DBID_ACK" },
{ 0x05, "TOTAL_WR_DATA_IN" },
{ 0x06, "TOTAL_WR_COMP" },
{ 0x07, "TOTAL_RD_DATA_OUT" },
{ 0x08, "TOTAL_CDN_REQ_IN_BANK0" },
{ 0x09, "TOTAL_CDN_REQ_IN_BANK1" },
{ 0x0a, "TOTAL_DDN_REQ_IN_BANK0" },
{ 0x0b, "TOTAL_DDN_REQ_IN_BANK1" },
{ 0x0c, "TOTAL_EMEM_RD_RES_IN_BANK0" },
{ 0x0d, "TOTAL_EMEM_RD_RES_IN_BANK1" },
{ 0x0e, "TOTAL_CACHE_RD_RES_IN_BANK0" },
{ 0x0f, "TOTAL_CACHE_RD_RES_IN_BANK1" },
{ 0x10, "TOTAL_EMEM_RD_REQ_BANK0" },
{ 0x11, "TOTAL_EMEM_RD_REQ_BANK1" },
{ 0x12, "TOTAL_EMEM_WR_REQ_BANK0" },
{ 0x13, "TOTAL_EMEM_WR_REQ_BANK1" },
{ 0x14, "TOTAL_RD_REQ_OUT" },
{ 0x15, "TOTAL_WR_REQ_OUT" },
{ 0x16, "TOTAL_RD_RES_IN" },
{ 0x17, "HITS_BANK0" },
{ 0x18, "HITS_BANK1" },
{ 0x19, "MISSES_BANK0" },
{ 0x1a, "MISSES_BANK1" },
{ 0x1b, "ALLOCATIONS_BANK0" },
{ 0x1c, "ALLOCATIONS_BANK1" },
{ 0x1d, "EVICTIONS_BANK0" },
{ 0x1e, "EVICTIONS_BANK1" },
{ 0x1f, "DBID_REJECT" },
{ 0x20, "WRDB_REJECT_BANK0" },
{ 0x21, "WRDB_REJECT_BANK1" },
{ 0x22, "CMDQ_REJECT_BANK0" },
{ 0x23, "CMDQ_REJECT_BANK1" },
{ 0x24, "COB_REJECT_BANK0" },
{ 0x25, "COB_REJECT_BANK1" },
{ 0x26, "TRB_REJECT_BANK0" },
{ 0x27, "TRB_REJECT_BANK1" },
{ 0x28, "TAG_REJECT_BANK0" },
{ 0x29, "TAG_REJECT_BANK1" },
{ 0x2a, "ANY_REJECT_BANK0" },
{ 0x2b, "ANY_REJECT_BANK1" },
};
static struct mlxbf_pmc_context *pmc;
/* UUID used to probe ATF service. */
static const char *mlxbf_pmc_svc_uuid_str = "89c036b4-e7d7-11e6-8797-001aca00bfc4";
/* Calls an SMC to access a performance register */
static int mlxbf_pmc_secure_read(void __iomem *addr, uint32_t command,
uint64_t *result)
{
struct arm_smccc_res res;
int status, err = 0;
arm_smccc_smc(command, pmc->sreg_tbl_perf, (uintptr_t)addr, 0, 0, 0, 0,
0, &res);
status = res.a0;
switch (status) {
case PSCI_RET_NOT_SUPPORTED:
err = -EINVAL;
break;
case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
err = -EACCES;
break;
default:
*result = res.a1;
break;
}
return err;
}
/* Read from a performance counter */
static int mlxbf_pmc_read(void __iomem *addr, uint32_t command,
uint64_t *result)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_read(addr, command, result);
if (command == MLXBF_PMC_READ_REG_32)
*result = readl(addr);
else
*result = readq(addr);
return 0;
}
/* Convenience function for 32-bit reads */
static int mlxbf_pmc_readl(void __iomem *addr, uint32_t *result)
{
uint64_t read_out;
int status;
status = mlxbf_pmc_read(addr, MLXBF_PMC_READ_REG_32, &read_out);
if (status)
return status;
*result = (uint32_t)read_out;
return 0;
}
/* Calls an SMC to access a performance register */
static int mlxbf_pmc_secure_write(void __iomem *addr, uint32_t command,
uint64_t value)
{
struct arm_smccc_res res;
int status, err = 0;
arm_smccc_smc(command, pmc->sreg_tbl_perf, value, (uintptr_t)addr, 0, 0,
0, 0, &res);
status = res.a0;
switch (status) {
case PSCI_RET_NOT_SUPPORTED:
err = -EINVAL;
break;
case MLXBF_PMC_SMCCC_ACCESS_VIOLATION:
err = -EACCES;
break;
}
return err;
}
/* Write to a performance counter */
static int mlxbf_pmc_write(void __iomem *addr, int command, uint64_t value)
{
if (pmc->svc_sreg_support)
return mlxbf_pmc_secure_write(addr, command, value);
if (command == MLXBF_PMC_WRITE_REG_32)
writel(value, addr);
else
writeq(value, addr);
return 0;
}
/* Check if the register offset is within the mapped region for the block */
static bool mlxbf_pmc_valid_range(int blk_num, uint32_t offset)
{
if ((offset >= 0) && !(offset % MLXBF_PMC_REG_SIZE) &&
(offset + MLXBF_PMC_REG_SIZE <= pmc->block[blk_num].blk_size))
return true; /* inside the mapped PMC space */
return false;
}
/* Get the event list corresponding to a certain block */
static const struct mlxbf_pmc_events *mlxbf_pmc_event_list(const char *blk,
int *size)
{
const struct mlxbf_pmc_events *events;
if (strstr(blk, "tilenet")) {
events = mlxbf_pmc_hnfnet_events;
*size = ARRAY_SIZE(mlxbf_pmc_hnfnet_events);
} else if (strstr(blk, "tile")) {
events = mlxbf_pmc_hnf_events;
*size = ARRAY_SIZE(mlxbf_pmc_hnf_events);
} else if (strstr(blk, "triogen")) {
events = mlxbf_pmc_smgen_events;
*size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "trio")) {
switch (pmc->event_set) {
case MLXBF_PMC_EVENT_SET_BF1:
events = mlxbf_pmc_trio_events_1;
*size = ARRAY_SIZE(mlxbf_pmc_trio_events_1);
break;
case MLXBF_PMC_EVENT_SET_BF2:
events = mlxbf_pmc_trio_events_2;
*size = ARRAY_SIZE(mlxbf_pmc_trio_events_2);
break;
default:
events = NULL;
*size = 0;
break;
}
} else if (strstr(blk, "mss")) {
events = mlxbf_pmc_mss_events;
*size = ARRAY_SIZE(mlxbf_pmc_mss_events);
} else if (strstr(blk, "ecc")) {
events = mlxbf_pmc_ecc_events;
*size = ARRAY_SIZE(mlxbf_pmc_ecc_events);
} else if (strstr(blk, "pcie")) {
events = mlxbf_pmc_pcie_events;
*size = ARRAY_SIZE(mlxbf_pmc_pcie_events);
} else if (strstr(blk, "l3cache")) {
events = mlxbf_pmc_l3c_events;
*size = ARRAY_SIZE(mlxbf_pmc_l3c_events);
} else if (strstr(blk, "gic")) {
events = mlxbf_pmc_smgen_events;
*size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else if (strstr(blk, "smmu")) {
events = mlxbf_pmc_smgen_events;
*size = ARRAY_SIZE(mlxbf_pmc_smgen_events);
} else {
events = NULL;
*size = 0;
}
return events;
}
/* Get the event number given the name */
static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
{
const struct mlxbf_pmc_events *events;
int i, size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
return -EINVAL;
for (i = 0; i < size; ++i) {
if (!strcmp(evt, events[i].evt_name))
return events[i].evt_num;
}
return -ENODEV;
}
/* Get the event number given the name */
static char *mlxbf_pmc_get_event_name(const char *blk, int evt)
{
const struct mlxbf_pmc_events *events;
int i, size;
events = mlxbf_pmc_event_list(blk, &size);
if (!events)
return NULL;
for (i = 0; i < size; ++i) {
if (evt == events[i].evt_num)
return events[i].evt_name;
}
return NULL;
}
/* Method to enable/disable/reset l3cache counters */
static int mlxbf_pmc_config_l3_counters(int blk_num, bool enable, bool reset)
{
uint32_t perfcnt_cfg = 0;
if (enable)
perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_EN;
if (reset)
perfcnt_cfg |= MLXBF_PMC_L3C_PERF_CNT_CFG_RST;
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_CFG,
MLXBF_PMC_WRITE_REG_32, perfcnt_cfg);
}
/* Method to handle l3cache counter programming */
static int mlxbf_pmc_program_l3_counter(int blk_num, uint32_t cnt_num,
uint32_t evt)
{
uint32_t perfcnt_sel_1 = 0;
uint32_t perfcnt_sel = 0;
uint32_t *wordaddr;
void __iomem *pmcaddr;
int ret;
/* Disable all counters before programming them */
if (mlxbf_pmc_config_l3_counters(blk_num, false, false))
return -EINVAL;
/* Select appropriate register information */
switch (cnt_num) {
case 0 ... 3:
pmcaddr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_SEL;
wordaddr = &perfcnt_sel;
break;
case 4:
pmcaddr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_SEL_1;
wordaddr = &perfcnt_sel_1;
break;
default:
return -EINVAL;
}
ret = mlxbf_pmc_readl(pmcaddr, wordaddr);
if (ret)
return ret;
switch (cnt_num) {
case 0:
perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0;
perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0,
evt);
break;
case 1:
perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1;
perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1,
evt);
break;
case 2:
perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2;
perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2,
evt);
break;
case 3:
perfcnt_sel &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3;
perfcnt_sel |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3,
evt);
break;
case 4:
perfcnt_sel_1 &= ~MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4;
perfcnt_sel_1 |= FIELD_PREP(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
evt);
break;
default:
return -EINVAL;
}
return mlxbf_pmc_write(pmcaddr, MLXBF_PMC_WRITE_REG_32, *wordaddr);
}
/* Method to program a counter to monitor an event */
static int mlxbf_pmc_program_counter(int blk_num, uint32_t cnt_num,
uint32_t evt, bool is_l3)
{
uint64_t perfctl, perfevt, perfmon_cfg;
if (cnt_num >= pmc->block[blk_num].counters)
return -ENODEV;
if (is_l3)
return mlxbf_pmc_program_l3_counter(blk_num, cnt_num, evt);
/* Configure the counter */
perfctl = FIELD_PREP(MLXBF_PMC_PERFCTL_EN0, 1);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_EB0, 0);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ETRIG0, 1);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_AD0, 0);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_ACCM0, 0);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_MS0, 0);
perfctl |= FIELD_PREP(MLXBF_PMC_PERFCTL_FM0, 0);
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfctl);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFCTL);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
cnt_num * MLXBF_PMC_REG_SIZE,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
/* Select the event */
perfevt = FIELD_PREP(MLXBF_PMC_PERFEVT_EVTSEL, evt);
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WDATA, perfevt);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
cnt_num * MLXBF_PMC_REG_SIZE,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
/* Clear the accumulator */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFACC0);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 1);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base +
cnt_num * MLXBF_PMC_REG_SIZE,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
return 0;
}
/* Method to handle l3 counter reads */
static int mlxbf_pmc_read_l3_counter(int blk_num, uint32_t cnt_num,
uint64_t *result)
{
uint32_t perfcnt_low = 0, perfcnt_high = 0;
uint64_t value;
int status = 0;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_LOW +
cnt_num * MLXBF_PMC_L3C_REG_SIZE,
&perfcnt_low);
if (status)
return status;
status = mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_HIGH +
cnt_num * MLXBF_PMC_L3C_REG_SIZE,
&perfcnt_high);
if (status)
return status;
value = perfcnt_high;
value = value << 32;
value |= perfcnt_low;
*result = value;
return 0;
}
/* Method to read the counter value */
static int mlxbf_pmc_read_counter(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
uint64_t perfmon_cfg;
int status;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
if (is_l3)
return mlxbf_pmc_read_l3_counter(blk_num, cnt_num, result);
perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
perfval_offset = perfcfg_offset +
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFACC0);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
status = mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg);
if (status)
return status;
/* Get the counter value */
return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
MLXBF_PMC_READ_REG_64, result);
}
/* Method to read L3 block event */
static int mlxbf_pmc_read_l3_event(int blk_num, uint32_t cnt_num,
uint64_t *result)
{
uint32_t perfcnt_sel = 0, perfcnt_sel_1 = 0;
uint32_t *wordaddr;
void __iomem *pmcaddr;
uint64_t evt;
/* Select appropriate register information */
switch (cnt_num) {
case 0 ... 3:
pmcaddr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_SEL;
wordaddr = &perfcnt_sel;
break;
case 4:
pmcaddr = pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_SEL_1;
wordaddr = &perfcnt_sel_1;
break;
default:
return -EINVAL;
}
if (mlxbf_pmc_readl(pmcaddr, wordaddr))
return -EINVAL;
/* Read from appropriate register field for the counter */
switch (cnt_num) {
case 0:
evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_0, perfcnt_sel);
break;
case 1:
evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_1, perfcnt_sel);
break;
case 2:
evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_2, perfcnt_sel);
break;
case 3:
evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_CNT_3, perfcnt_sel);
break;
case 4:
evt = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_SEL_1_CNT_4,
perfcnt_sel_1);
break;
default:
return -EINVAL;
}
*result = evt;
return 0;
}
/* Method to find the event currently being monitored by a counter */
static int mlxbf_pmc_read_event(int blk_num, uint32_t cnt_num, bool is_l3,
uint64_t *result)
{
uint32_t perfcfg_offset, perfval_offset;
uint64_t perfmon_cfg, perfevt;
if (cnt_num >= pmc->block[blk_num].counters)
return -EINVAL;
if (is_l3)
return mlxbf_pmc_read_l3_event(blk_num, cnt_num, result);
perfcfg_offset = cnt_num * MLXBF_PMC_REG_SIZE;
perfval_offset = perfcfg_offset +
pmc->block[blk_num].counters * MLXBF_PMC_REG_SIZE;
/* Set counter in "read" mode */
perfmon_cfg = FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_ADDR,
MLXBF_PMC_PERFEVT);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_STROBE, 1);
perfmon_cfg |= FIELD_PREP(MLXBF_PMC_PERFMON_CONFIG_WR_R_B, 0);
if (mlxbf_pmc_write(pmc->block[blk_num].mmio_base + perfcfg_offset,
MLXBF_PMC_WRITE_REG_64, perfmon_cfg))
return -EFAULT;
/* Get the event number */
if (mlxbf_pmc_read(pmc->block[blk_num].mmio_base + perfval_offset,
MLXBF_PMC_READ_REG_64, &perfevt))
return -EFAULT;
*result = FIELD_GET(MLXBF_PMC_PERFEVT_EVTSEL, perfevt);
return 0;
}
/* Method to read a register */
static int mlxbf_pmc_read_reg(int blk_num, uint32_t offset, uint64_t *result)
{
uint32_t ecc_out;
if (strstr(pmc->block_name[blk_num], "ecc")) {
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base + offset,
&ecc_out))
return -EFAULT;
*result = ecc_out;
return 0;
}
if (mlxbf_pmc_valid_range(blk_num, offset))
return mlxbf_pmc_read(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_READ_REG_64, result);
return -EINVAL;
}
/* Method to write to a register */
static int mlxbf_pmc_write_reg(int blk_num, uint32_t offset, uint64_t data)
{
if (strstr(pmc->block_name[blk_num], "ecc")) {
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_WRITE_REG_32, data);
}
if (mlxbf_pmc_valid_range(blk_num, offset))
return mlxbf_pmc_write(pmc->block[blk_num].mmio_base + offset,
MLXBF_PMC_WRITE_REG_64, data);
return -EINVAL;
}
/* Show function for "counter" sysfs files */
static ssize_t mlxbf_pmc_counter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int blk_num, cnt_num, offset;
bool is_l3 = false;
uint64_t value;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
if (strstr(pmc->block_name[blk_num], "l3cache"))
is_l3 = true;
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
if (mlxbf_pmc_read_counter(blk_num, cnt_num, is_l3, &value))
return -EINVAL;
} else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
attr->attr.name);
if (offset < 0)
return -EINVAL;
if (mlxbf_pmc_read_reg(blk_num, offset, &value))
return -EINVAL;
} else
return -EINVAL;
return sysfs_emit(buf, "0x%llx\n", value);
}
/* Store function for "counter" sysfs files */
static ssize_t mlxbf_pmc_counter_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlxbf_pmc_attribute *attr_counter = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int blk_num, cnt_num, offset, err, data;
bool is_l3 = false;
uint64_t evt_num;
blk_num = attr_counter->nr;
cnt_num = attr_counter->index;
err = kstrtoint(buf, 0, &data);
if (err < 0)
return err;
/* Allow non-zero writes only to the ecc regs */
if (!(strstr(pmc->block_name[blk_num], "ecc")) && data)
return -EINVAL;
/* Do not allow writes to the L3C regs */
if (strstr(pmc->block_name[blk_num], "l3cache"))
return -EINVAL;
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER) {
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
return err;
err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num,
is_l3);
if (err)
return err;
} else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER) {
offset = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
attr->attr.name);
if (offset < 0)
return -EINVAL;
err = mlxbf_pmc_write_reg(blk_num, offset, data);
if (err)
return err;
} else
return -EINVAL;
return count;
}
/* Show function for "event" sysfs files */
static ssize_t mlxbf_pmc_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int blk_num, cnt_num, err;
bool is_l3 = false;
uint64_t evt_num;
char *evt_name;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
if (strstr(pmc->block_name[blk_num], "l3cache"))
is_l3 = true;
err = mlxbf_pmc_read_event(blk_num, cnt_num, is_l3, &evt_num);
if (err)
return sysfs_emit(buf, "No event being monitored\n");
evt_name = mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num);
if (!evt_name)
return -EINVAL;
return sysfs_emit(buf, "0x%llx: %s\n", evt_num, evt_name);
}
/* Store function for "event" sysfs files */
static ssize_t mlxbf_pmc_event_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlxbf_pmc_attribute *attr_event = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int blk_num, cnt_num, evt_num, err;
bool is_l3 = false;
blk_num = attr_event->nr;
cnt_num = attr_event->index;
if (isalpha(buf[0])) {
evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
buf);
if (evt_num < 0)
return -EINVAL;
} else {
err = kstrtoint(buf, 0, &evt_num);
if (err < 0)
return err;
}
if (strstr(pmc->block_name[blk_num], "l3cache"))
is_l3 = true;
err = mlxbf_pmc_program_counter(blk_num, cnt_num, evt_num, is_l3);
if (err)
return err;
return count;
}
/* Show function for "event_list" sysfs files */
static ssize_t mlxbf_pmc_event_list_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlxbf_pmc_attribute *attr_event_list = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int blk_num, i, size, len = 0, ret = 0;
const struct mlxbf_pmc_events *events;
char e_info[MLXBF_PMC_EVENT_INFO_LEN];
blk_num = attr_event_list->nr;
events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &size);
if (!events)
return -EINVAL;
for (i = 0, buf[0] = '\0'; i < size; ++i) {
len += snprintf(e_info, sizeof(e_info), "0x%x: %s\n",
events[i].evt_num, events[i].evt_name);
if (len >= PAGE_SIZE)
break;
strcat(buf, e_info);
ret = len;
}
return ret;
}
/* Show function for "enable" sysfs files - only for l3cache */
static ssize_t mlxbf_pmc_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
uint32_t perfcnt_cfg;
int blk_num, value;
blk_num = attr_enable->nr;
if (mlxbf_pmc_readl(pmc->block[blk_num].mmio_base +
MLXBF_PMC_L3C_PERF_CNT_CFG,
&perfcnt_cfg))
return -EINVAL;
value = FIELD_GET(MLXBF_PMC_L3C_PERF_CNT_CFG_EN, perfcnt_cfg);
return sysfs_emit(buf, "%d\n", value);
}
/* Store function for "enable" sysfs files - only for l3cache */
static ssize_t mlxbf_pmc_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
int err, en, blk_num;
blk_num = attr_enable->nr;
err = kstrtoint(buf, 0, &en);
if (err < 0)
return err;
if (!en) {
err = mlxbf_pmc_config_l3_counters(blk_num, false, false);
if (err)
return err;
} else if (en == 1) {
err = mlxbf_pmc_config_l3_counters(blk_num, false, true);
if (err)
return err;
err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
if (err)
return err;
} else
return -EINVAL;
return count;
}
/* Populate attributes for blocks with counters to monitor performance */
static int mlxbf_pmc_init_perftype_counter(struct device *dev, int blk_num)
{
struct mlxbf_pmc_attribute *attr;
int i = 0, j = 0;
/* "event_list" sysfs to list events supported by the block */
attr = &pmc->block[blk_num].attr_event_list;
attr->dev_attr.attr.mode = 0444;
attr->dev_attr.show = mlxbf_pmc_event_list_show;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL, "event_list");
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
attr = NULL;
/* "enable" sysfs to start/stop the counters. Only in L3C blocks */
if (strstr(pmc->block_name[blk_num], "l3cache")) {
attr = &pmc->block[blk_num].attr_enable;
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_enable_show;
attr->dev_attr.store = mlxbf_pmc_enable_store;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"enable");
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
}
pmc->block[blk_num].attr_counter = devm_kcalloc(
dev, pmc->block[blk_num].counters,
sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
if (!pmc->block[blk_num].attr_counter)
return -ENOMEM;
pmc->block[blk_num].attr_event = devm_kcalloc(
dev, pmc->block[blk_num].counters,
sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
if (!pmc->block[blk_num].attr_event)
return -ENOMEM;
/* "eventX" and "counterX" sysfs to program and read counter values */
for (j = 0; j < pmc->block[blk_num].counters; ++j) {
attr = &pmc->block[blk_num].attr_counter[j];
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->index = j;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"counter%d", j);
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
attr = &pmc->block[blk_num].attr_event[j];
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_event_show;
attr->dev_attr.store = mlxbf_pmc_event_store;
attr->index = j;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
"event%d", j);
pmc->block[blk_num].block_attr[++i] = &attr->dev_attr.attr;
attr = NULL;
}
return 0;
}
/* Populate attributes for blocks with registers to monitor performance */
static int mlxbf_pmc_init_perftype_reg(struct device *dev, int blk_num)
{
struct mlxbf_pmc_attribute *attr;
const struct mlxbf_pmc_events *events;
int i = 0, j = 0;
events = mlxbf_pmc_event_list(pmc->block_name[blk_num], &j);
if (!events)
return -EINVAL;
pmc->block[blk_num].attr_event = devm_kcalloc(
dev, j, sizeof(struct mlxbf_pmc_attribute), GFP_KERNEL);
if (!pmc->block[blk_num].attr_event)
return -ENOMEM;
while (j > 0) {
--j;
attr = &pmc->block[blk_num].attr_event[j];
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_counter_show;
attr->dev_attr.store = mlxbf_pmc_counter_store;
attr->nr = blk_num;
attr->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
events[j].evt_name);
pmc->block[blk_num].block_attr[i] = &attr->dev_attr.attr;
attr = NULL;
i++;
}
return 0;
}
/* Helper to create the bfperf sysfs sub-directories and files */
static int mlxbf_pmc_create_groups(struct device *dev, int blk_num)
{
int err;
/* Populate attributes based on counter type */
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_COUNTER)
err = mlxbf_pmc_init_perftype_counter(dev, blk_num);
else if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_REGISTER)
err = mlxbf_pmc_init_perftype_reg(dev, blk_num);
else
err = -EINVAL;
if (err)
return err;
/* Add a new attribute_group for the block */
pmc->block[blk_num].block_attr_grp.attrs = pmc->block[blk_num].block_attr;
pmc->block[blk_num].block_attr_grp.name = devm_kasprintf(
dev, GFP_KERNEL, pmc->block_name[blk_num]);
pmc->groups[blk_num] = &pmc->block[blk_num].block_attr_grp;
return 0;
}
static bool mlxbf_pmc_guid_match(const guid_t *guid,
const struct arm_smccc_res *res)
{
guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16, res->a2,
res->a2 >> 8, res->a2 >> 16, res->a2 >> 24,
res->a3, res->a3 >> 8, res->a3 >> 16,
res->a3 >> 24);
return guid_equal(guid, &id);
}
/* Helper to map the Performance Counters from the varios blocks */
static int mlxbf_pmc_map_counters(struct device *dev)
{
uint64_t info[MLXBF_PMC_INFO_SZ];
int i, tile_num, ret;
for (i = 0; i < pmc->total_blocks; ++i) {
if (strstr(pmc->block_name[i], "tile")) {
if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
return -EINVAL;
if (tile_num >= pmc->tile_count)
continue;
}
ret = device_property_read_u64_array(dev, pmc->block_name[i],
info, MLXBF_PMC_INFO_SZ);
if (ret)
return ret;
/*
* Do not remap if the proper SMC calls are supported,
* since the SMC calls expect physical addresses.
*/
if (pmc->svc_sreg_support)
pmc->block[i].mmio_base = (void __iomem *)info[0];
else
pmc->block[i].mmio_base =
devm_ioremap(dev, info[0], info[1]);
pmc->block[i].blk_size = info[1];
pmc->block[i].counters = info[2];
pmc->block[i].type = info[3];
if (!pmc->block[i].mmio_base)
return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i);
if (ret)
return ret;
}
return 0;
}
static int mlxbf_pmc_probe(struct platform_device *pdev)
{
struct acpi_device *acpi_dev = ACPI_COMPANION(&pdev->dev);
const char *hid = acpi_device_hid(acpi_dev);
struct device *dev = &pdev->dev;
struct arm_smccc_res res;
guid_t guid;
int ret;
/* Ensure we have the UUID we expect for this service. */
arm_smccc_smc(MLXBF_PMC_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
guid_parse(mlxbf_pmc_svc_uuid_str, &guid);
if (!mlxbf_pmc_guid_match(&guid, &res))
return -ENODEV;
pmc = devm_kzalloc(dev, sizeof(struct mlxbf_pmc_context), GFP_KERNEL);
if (!pmc)
return -ENOMEM;
/*
* ACPI indicates whether we use SMCs to access registers or not.
* If sreg_tbl_perf is not present, just assume we're not using SMCs.
*/
ret = device_property_read_u32(dev, "sec_reg_block",
&pmc->sreg_tbl_perf);
if (ret) {
pmc->svc_sreg_support = false;
} else {
/*
* Check service version to see if we actually do support the
* needed SMCs. If we have the calls we need, mark support for
* them in the pmc struct.
*/
arm_smccc_smc(MLXBF_PMC_SIP_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0,
&res);
if (res.a0 == MLXBF_PMC_SVC_REQ_MAJOR &&
res.a1 >= MLXBF_PMC_SVC_MIN_MINOR)
pmc->svc_sreg_support = true;
else
return -EINVAL;
}
if (!strcmp(hid, "MLNXBFD0"))
pmc->event_set = MLXBF_PMC_EVENT_SET_BF1;
else if (!strcmp(hid, "MLNXBFD1"))
pmc->event_set = MLXBF_PMC_EVENT_SET_BF2;
else
return -ENODEV;
ret = device_property_read_u32(dev, "block_num", &pmc->total_blocks);
if (ret)
return ret;
ret = device_property_read_string_array(dev, "block_name",
pmc->block_name,
pmc->total_blocks);
if (ret != pmc->total_blocks)
return -EFAULT;
ret = device_property_read_u32(dev, "tile_num", &pmc->tile_count);
if (ret)
return ret;
pmc->pdev = pdev;
ret = mlxbf_pmc_map_counters(dev);
if (ret)
return ret;
pmc->hwmon_dev = devm_hwmon_device_register_with_groups(
dev, "bfperf", pmc, pmc->groups);
platform_set_drvdata(pdev, pmc);
return 0;
}
static const struct acpi_device_id mlxbf_pmc_acpi_ids[] = { { "MLNXBFD0", 0 },
{ "MLNXBFD1", 0 },
{}, };
MODULE_DEVICE_TABLE(acpi, mlxbf_pmc_acpi_ids);
static struct platform_driver pmc_driver = {
.driver = { .name = "mlxbf-pmc",
.acpi_match_table = ACPI_PTR(mlxbf_pmc_acpi_ids), },
.probe = mlxbf_pmc_probe,
};
module_platform_driver(pmc_driver);
MODULE_AUTHOR("Shravan Kumar Ramani <[email protected]>");
MODULE_DESCRIPTION("Mellanox PMC driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/platform/mellanox/mlxbf-pmc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox BlueField SoC TmFifo driver
*
* Copyright (C) 2019 Mellanox Technologies
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/virtio_config.h>
#include <linux/virtio_console.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_net.h>
#include <linux/virtio_ring.h>
#include "mlxbf-tmfifo-regs.h"
/* Vring size. */
#define MLXBF_TMFIFO_VRING_SIZE SZ_1K
/* Console Tx buffer size. */
#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
/* Console Tx buffer reserved space. */
#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
/* House-keeping timer interval. */
#define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
/* Virtual devices sharing the TM FIFO. */
#define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
/*
* Reserve 1/16 of TmFifo space, so console messages are not starved by
* the networking traffic.
*/
#define MLXBF_TMFIFO_RESERVE_RATIO 16
/* Message with data needs at least two words (for header & data). */
#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
/* ACPI UID for BlueField-3. */
#define TMFIFO_BF3_UID 1
struct mlxbf_tmfifo;
/**
* mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
* @va: virtual address of the ring
* @dma: dma address of the ring
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
* @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
* @next_avail: next avail descriptor id
* @num: vring size (number of descriptors)
* @align: vring alignment size
* @index: vring index
* @vdev_id: vring virtio id (VIRTIO_ID_xxx)
* @fifo: pointer to the tmfifo structure
*/
struct mlxbf_tmfifo_vring {
void *va;
dma_addr_t dma;
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
struct vring_desc drop_desc;
int cur_len;
int rem_len;
u32 pkt_len;
u16 next_avail;
int num;
int align;
int index;
int vdev_id;
struct mlxbf_tmfifo *fifo;
};
/* Check whether vring is in drop mode. */
#define IS_VRING_DROP(_r) ({ \
typeof(_r) (r) = (_r); \
(r->desc_head == &r->drop_desc ? true : false); })
/* A stub length to drop maximum length packet. */
#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0)
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
MLXBF_TM_RX_HWM_IRQ,
MLXBF_TM_TX_LWM_IRQ,
MLXBF_TM_TX_HWM_IRQ,
MLXBF_TM_MAX_IRQ
};
/* Ring types (Rx & Tx). */
enum {
MLXBF_TMFIFO_VRING_RX,
MLXBF_TMFIFO_VRING_TX,
MLXBF_TMFIFO_VRING_MAX
};
/**
* mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
* @vdev: virtio device, in which the vdev.id.device field has the
* VIRTIO_ID_xxx id to distinguish the virtual device.
* @status: status of the device
* @features: supported features of the device
* @vrings: array of tmfifo vrings of this device
* @config.cons: virtual console config -
* select if vdev.id.device is VIRTIO_ID_CONSOLE
* @config.net: virtual network config -
* select if vdev.id.device is VIRTIO_ID_NET
* @tx_buf: tx buffer used to buffer data before writing into the FIFO
*/
struct mlxbf_tmfifo_vdev {
struct virtio_device vdev;
u8 status;
u64 features;
struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
union {
struct virtio_console_config cons;
struct virtio_net_config net;
} config;
struct circ_buf tx_buf;
};
/**
* mlxbf_tmfifo_irq_info - Structure of the interrupt information
* @fifo: pointer to the tmfifo structure
* @irq: interrupt number
* @index: index into the interrupt array
*/
struct mlxbf_tmfifo_irq_info {
struct mlxbf_tmfifo *fifo;
int irq;
int index;
};
/**
* mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx)
* @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL)
* @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS)
* @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA)
*/
struct mlxbf_tmfifo_io {
void __iomem *ctl;
void __iomem *sts;
void __iomem *data;
};
/**
* mlxbf_tmfifo - Structure of the TmFifo
* @vdev: array of the virtual devices running over the TmFifo
* @lock: lock to protect the TmFifo access
* @res0: mapped resource block 0
* @res1: mapped resource block 1
* @rx: rx io resource
* @tx: tx io resource
* @rx_fifo_size: number of entries of the Rx FIFO
* @tx_fifo_size: number of entries of the Tx FIFO
* @pend_events: pending bits for deferred events
* @irq_info: interrupt information
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
* @spin_lock: Tx/Rx spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
struct mutex lock; /* TmFifo lock */
void __iomem *res0;
void __iomem *res1;
struct mlxbf_tmfifo_io rx;
struct mlxbf_tmfifo_io tx;
int rx_fifo_size;
int tx_fifo_size;
unsigned long pend_events;
struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
spinlock_t spin_lock[2]; /* spin lock */
bool is_ready;
};
/**
* mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
* @type: message type
* @len: payload length in network byte order. Messages sent into the FIFO
* will be read by the other side as data stream in the same byte order.
* The length needs to be encoded into network order so both sides
* could understand it.
*/
struct mlxbf_tmfifo_msg_hdr {
u8 type;
__be16 len;
u8 unused[5];
} __packed __aligned(sizeof(u64));
/*
* Default MAC.
* This MAC address will be read from EFI persistent variable if configured.
* It can also be reconfigured with standard Linux tools.
*/
static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
};
/* EFI variable name of the MAC address. */
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN)
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
(BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
BIT_ULL(VIRTIO_NET_F_MAC))
#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
/* Free vrings of the FIFO device. */
static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
struct mlxbf_tmfifo_vdev *tm_vdev)
{
struct mlxbf_tmfifo_vring *vring;
int i, size;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
if (vring->va) {
size = vring_size(vring->num, vring->align);
dma_free_coherent(tm_vdev->vdev.dev.parent, size,
vring->va, vring->dma);
vring->va = NULL;
if (vring->vq) {
vring_del_virtqueue(vring->vq);
vring->vq = NULL;
}
}
}
}
/* Allocate vrings for the FIFO. */
static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
struct mlxbf_tmfifo_vdev *tm_vdev)
{
struct mlxbf_tmfifo_vring *vring;
struct device *dev;
dma_addr_t dma;
int i, size;
void *va;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
vring->fifo = fifo;
vring->num = MLXBF_TMFIFO_VRING_SIZE;
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
if (!va) {
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
dev_err(dev->parent, "dma_alloc_coherent failed\n");
return -ENOMEM;
}
vring->va = va;
vring->dma = dma;
}
return 0;
}
/* Disable interrupts of the FIFO device. */
static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
{
int i, irq;
for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
irq = fifo->irq_info[i].irq;
fifo->irq_info[i].irq = 0;
disable_irq(irq);
}
}
/* Interrupt handler. */
static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
{
struct mlxbf_tmfifo_irq_info *irq_info = arg;
if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
schedule_work(&irq_info->fifo->work);
return IRQ_HANDLED;
}
/* Get the next packet descriptor from the vring. */
static struct vring_desc *
mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
unsigned int idx, head;
if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
return NULL;
/* Make sure 'avail->idx' is visible already. */
virtio_rmb(false);
idx = vring->next_avail % vr->num;
head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
if (WARN_ON(head >= vr->num))
return NULL;
vring->next_avail++;
return &vr->desc[head];
}
/* Release virtio descriptor. */
static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc, u32 len)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
u16 idx, vr_idx;
vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
idx = vr_idx % vr->num;
vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
/*
* Virtio could poll and check the 'idx' to decide whether the desc is
* done or not. Add a memory barrier here to make sure the update above
* completes before updating the idx.
*/
virtio_mb(false);
vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
}
/* Get the total length of the descriptor chain. */
static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
u32 len = 0, idx;
while (desc) {
len += virtio32_to_cpu(vdev, desc->len);
if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
break;
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
}
return len;
}
static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
if (vring->desc_head) {
desc_head = vring->desc_head;
len = vring->pkt_len;
} else {
desc_head = mlxbf_tmfifo_get_next_desc(vring);
len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
}
if (desc_head)
mlxbf_tmfifo_release_desc(vring, desc_head, len);
vring->pkt_len = 0;
vring->desc = NULL;
vring->desc_head = NULL;
}
static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc, bool is_rx)
{
struct virtio_device *vdev = vring->vq->vdev;
struct virtio_net_hdr *net_hdr;
net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
memset(net_hdr, 0, sizeof(*net_hdr));
}
/* Get and initialize the next packet. */
static struct vring_desc *
mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
struct vring_desc *desc;
desc = mlxbf_tmfifo_get_next_desc(vring);
if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
vring->desc_head = desc;
vring->desc = desc;
return desc;
}
/* House-keeping timer. */
static void mlxbf_tmfifo_timer(struct timer_list *t)
{
struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
int rx, tx;
rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
if (rx || tx)
schedule_work(&fifo->work);
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
}
/* Copy one console packet into the output buffer. */
static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = &cons->vdev;
u32 len, idx, seg;
void *addr;
while (desc) {
addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
len = virtio32_to_cpu(vdev, desc->len);
seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (len <= seg) {
memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
} else {
memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
addr += seg;
memcpy(cons->tx_buf.buf, addr, len - seg);
}
cons->tx_buf.head = (cons->tx_buf.head + len) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
break;
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
}
}
/* Copy console data into the output buffer. */
static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc;
u32 len, avail;
desc = mlxbf_tmfifo_get_next_desc(vring);
while (desc) {
/* Release the packet if not enough space. */
len = mlxbf_tmfifo_get_pkt_len(vring, desc);
avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
mlxbf_tmfifo_release_desc(vring, desc, len);
break;
}
mlxbf_tmfifo_console_output_one(cons, vring, desc);
mlxbf_tmfifo_release_desc(vring, desc, len);
desc = mlxbf_tmfifo_get_next_desc(vring);
}
}
/* Get the number of available words in Rx FIFO for receiving. */
static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
{
u64 sts;
sts = readq(fifo->rx.sts);
return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
}
/* Get the number of available words in the TmFifo for sending. */
static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
{
int tx_reserve;
u32 count;
u64 sts;
/* Reserve some room in FIFO for console messages. */
if (vdev_id == VIRTIO_ID_NET)
tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
else
tx_reserve = 1;
sts = readq(fifo->tx.sts);
count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
return fifo->tx_fifo_size - tx_reserve - count;
}
/* Console Tx (move data from the output buffer into the TmFifo). */
static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
{
struct mlxbf_tmfifo_msg_hdr hdr;
struct mlxbf_tmfifo_vdev *cons;
unsigned long flags;
int size, seg;
void *addr;
u64 data;
/* Return if not enough space available. */
if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
return;
cons = fifo->vdev[VIRTIO_ID_CONSOLE];
if (!cons || !cons->tx_buf.buf)
return;
/* Return if no data to send. */
size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (size == 0)
return;
/* Adjust the size to available space. */
if (size + sizeof(hdr) > avail * sizeof(u64))
size = avail * sizeof(u64) - sizeof(hdr);
/* Write header. */
hdr.type = VIRTIO_ID_CONSOLE;
hdr.len = htons(size);
writeq(*(u64 *)&hdr, fifo->tx.data);
/* Use spin-lock to protect the 'cons->tx_buf'. */
spin_lock_irqsave(&fifo->spin_lock[0], flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (seg >= sizeof(u64)) {
memcpy(&data, addr, sizeof(u64));
} else {
memcpy(&data, addr, seg);
memcpy((u8 *)&data + seg, cons->tx_buf.buf,
sizeof(u64) - seg);
}
writeq(data, fifo->tx.data);
if (size >= sizeof(u64)) {
cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
size -= sizeof(u64);
} else {
cons->tx_buf.tail = (cons->tx_buf.tail + size) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
size = 0;
}
}
spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
}
/* Rx/Tx one word in the descriptor buffer. */
static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc,
bool is_rx, int len)
{
struct virtio_device *vdev = vring->vq->vdev;
struct mlxbf_tmfifo *fifo = vring->fifo;
void *addr;
u64 data;
/* Get the buffer address of this desc. */
addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
/* Read a word from FIFO for Rx. */
if (is_rx)
data = readq(fifo->rx.data);
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
else
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
}
vring->cur_len = len;
}
/* Write the word into FIFO for Tx. */
if (!is_rx)
writeq(data, fifo->tx.data);
}
/*
* Rx/Tx packet header.
*
* In Rx case, the packet might be found to belong to a different vring since
* the TmFifo is shared by different services. In such case, the 'vring_change'
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
struct vring_desc **desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
bool drop_rx = false;
/* Read/Write packet header. */
if (is_rx) {
/* Drain one word from the FIFO. */
*(u64 *)&hdr = readq(fifo->rx.data);
/* Skip the length 0 packets (keepalive). */
if (hdr.len == 0)
return;
/* Check packet type. */
if (hdr.type == VIRTIO_ID_NET) {
vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net;
/* A legacy-only interface for now. */
if (ntohs(hdr.len) >
__virtio16_to_cpu(virtio_legacy_is_little_endian(),
config->mtu) +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
drop_rx = true;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
}
/*
* Check whether the new packet still belongs to this vring.
* If not, update the pkt_len of the new vring.
*/
if (vdev_id != vring->vdev_id) {
struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
if (!tm_dev2)
return;
vring->desc = *desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
if (drop_rx && !IS_VRING_DROP(vring)) {
if (vring->desc_head)
mlxbf_tmfifo_release_pkt(vring);
*desc = &vring->drop_desc;
vring->desc_head = *desc;
vring->desc = *desc;
}
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
writeq(*(u64 *)&hdr, fifo->tx.data);
}
vring->cur_len = hdr_len;
vring->rem_len = vring->pkt_len;
fifo->vring[is_rx] = vring;
}
/*
* Rx/Tx one descriptor.
*
* Return true to indicate more data available.
*/
static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
bool is_rx, int *avail)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_device *vdev;
bool vring_change = false;
struct vring_desc *desc;
unsigned long flags;
u32 len, idx;
vdev = &fifo->vdev[vring->vdev_id]->vdev;
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
if (!desc) {
/* Drop next Rx packet to avoid stuck. */
if (is_rx) {
desc = &vring->drop_desc;
vring->desc_head = desc;
vring->desc = desc;
} else {
return false;
}
}
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
if (vring_change)
return false;
goto mlxbf_tmfifo_desc_done;
}
/* Get the length of this desc. */
len = virtio32_to_cpu(vdev, desc->len);
if (len > vring->rem_len)
len = vring->rem_len;
/* Rx/Tx one word (8 bytes) if not done. */
if (vring->cur_len < len) {
mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
(*avail)--;
}
/* Check again whether it's done. */
if (vring->cur_len == len) {
vring->cur_len = 0;
vring->rem_len -= len;
/* Get the next desc on the chain. */
if (!IS_VRING_DROP(vring) && vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
/* Done and release the packet. */
desc = NULL;
fifo->vring[is_rx] = NULL;
if (!IS_VRING_DROP(vring)) {
mlxbf_tmfifo_release_pkt(vring);
} else {
vring->pkt_len = 0;
vring->desc_head = NULL;
vring->desc = NULL;
return false;
}
/*
* Make sure the load/store are in order before
* returning back to virtio.
*/
virtio_mb(false);
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
}
mlxbf_tmfifo_desc_done:
/* Save the current desc. */
vring->desc = desc;
return true;
}
/* Rx & Tx processing of a queue. */
static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
int avail = 0, devid = vring->vdev_id;
struct mlxbf_tmfifo *fifo;
bool more;
fifo = vring->fifo;
/* Return if vdev is not ready. */
if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
return;
/* Only handle console and network for now. */
if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
return;
do {
/* Get available FIFO space. */
if (avail == 0) {
if (is_rx)
avail = mlxbf_tmfifo_get_rx_avail(fifo);
else
avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
if (avail <= 0)
break;
}
/* Console output always comes from the Tx buffer. */
if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
mlxbf_tmfifo_console_tx(fifo, avail);
break;
}
/* Handle one descriptor. */
more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
} while (more);
}
/* Handle Rx or Tx queues. */
static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
int irq_id, bool is_rx)
{
struct mlxbf_tmfifo_vdev *tm_vdev;
struct mlxbf_tmfifo_vring *vring;
int i;
if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
!fifo->irq_info[irq_id].irq)
return;
for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
tm_vdev = fifo->vdev[i];
if (tm_vdev) {
vring = &tm_vdev->vrings[queue_id];
if (vring->vq)
mlxbf_tmfifo_rxtx(vring, is_rx);
}
}
}
/* Work handler for Rx and Tx case. */
static void mlxbf_tmfifo_work_handler(struct work_struct *work)
{
struct mlxbf_tmfifo *fifo;
fifo = container_of(work, struct mlxbf_tmfifo, work);
if (!fifo->is_ready)
return;
mutex_lock(&fifo->lock);
/* Tx (Send data to the TmFifo). */
mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
MLXBF_TM_TX_LWM_IRQ, false);
/* Rx (Receive data from the TmFifo). */
mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
MLXBF_TM_RX_HWM_IRQ, true);
mutex_unlock(&fifo->lock);
}
/* The notify function is called when new buffers are posted. */
static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
{
struct mlxbf_tmfifo_vring *vring = vq->priv;
struct mlxbf_tmfifo_vdev *tm_vdev;
struct mlxbf_tmfifo *fifo;
unsigned long flags;
fifo = vring->fifo;
/*
* Virtio maintains vrings in pairs, even number ring for Rx
* and odd number ring for Tx.
*/
if (vring->index & BIT(0)) {
/*
* Console could make blocking call with interrupts disabled.
* In such case, the vring needs to be served right away. For
* other cases, just set the TX LWM bit to start Tx in the
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
spin_lock_irqsave(&fifo->spin_lock[0], flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
}
} else {
if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
return true;
}
schedule_work(&fifo->work);
return true;
}
/* Get the array of feature bits for this device. */
static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
return tm_vdev->features;
}
/* Confirm device features to use. */
static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->features = vdev->features;
return 0;
}
/* Free virtqueues found by find_vqs(). */
static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
struct mlxbf_tmfifo_vring *vring;
struct virtqueue *vq;
int i;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
/* Release the pending packet. */
if (vring->desc)
mlxbf_tmfifo_release_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
vring_del_virtqueue(vq);
}
}
}
/* Create and initialize the virtual queues. */
static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
const bool *ctx,
struct irq_affinity *desc)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
struct mlxbf_tmfifo_vring *vring;
struct virtqueue *vq;
int i, ret, size;
if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
return -EINVAL;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
ret = -EINVAL;
goto error;
}
vring = &tm_vdev->vrings[i];
/* zero vring */
size = vring_size(vring->num, vring->align);
memset(vring->va, 0, size);
vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
false, false, vring->va,
mlxbf_tmfifo_virtio_notify,
callbacks[i], names[i]);
if (!vq) {
dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
ret = -ENOMEM;
goto error;
}
vq->num_max = vring->num;
vq->priv = vring;
/* Make vq update visible before using it. */
virtio_mb(false);
vqs[i] = vq;
vring->vq = vq;
}
return 0;
error:
mlxbf_tmfifo_virtio_del_vqs(vdev);
return ret;
}
/* Read the status byte. */
static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
return tm_vdev->status;
}
/* Write the status byte. */
static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
u8 status)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->status = status;
}
/* Reset the device. Not much here for now. */
static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->status = 0;
}
/* Read the value of a configuration field. */
static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
unsigned int offset,
void *buf,
unsigned int len)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
if ((u64)offset + len > sizeof(tm_vdev->config))
return;
memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
}
/* Write the value of a configuration field. */
static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
unsigned int offset,
const void *buf,
unsigned int len)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
if ((u64)offset + len > sizeof(tm_vdev->config))
return;
memcpy((u8 *)&tm_vdev->config + offset, buf, len);
}
static void tmfifo_virtio_dev_release(struct device *device)
{
struct virtio_device *vdev =
container_of(device, struct virtio_device, dev);
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
kfree(tm_vdev);
}
/* Virtio config operations. */
static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
.get_features = mlxbf_tmfifo_virtio_get_features,
.finalize_features = mlxbf_tmfifo_virtio_finalize_features,
.find_vqs = mlxbf_tmfifo_virtio_find_vqs,
.del_vqs = mlxbf_tmfifo_virtio_del_vqs,
.reset = mlxbf_tmfifo_virtio_reset,
.set_status = mlxbf_tmfifo_virtio_set_status,
.get_status = mlxbf_tmfifo_virtio_get_status,
.get = mlxbf_tmfifo_virtio_get,
.set = mlxbf_tmfifo_virtio_set,
};
/* Create vdev for the FIFO. */
static int mlxbf_tmfifo_create_vdev(struct device *dev,
struct mlxbf_tmfifo *fifo,
int vdev_id, u64 features,
void *config, u32 size)
{
struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
int ret;
mutex_lock(&fifo->lock);
tm_vdev = fifo->vdev[vdev_id];
if (tm_vdev) {
dev_err(dev, "vdev %d already exists\n", vdev_id);
ret = -EEXIST;
goto fail;
}
tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
if (!tm_vdev) {
ret = -ENOMEM;
goto fail;
}
tm_vdev->vdev.id.device = vdev_id;
tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
tm_vdev->vdev.dev.parent = dev;
tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
tm_vdev->features = features;
if (config)
memcpy(&tm_vdev->config, config, size);
if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
dev_err(dev, "unable to allocate vring\n");
ret = -ENOMEM;
goto vdev_fail;
}
/* Allocate an output buffer for the console device. */
if (vdev_id == VIRTIO_ID_CONSOLE)
tm_vdev->tx_buf.buf = devm_kmalloc(dev,
MLXBF_TMFIFO_CON_TX_BUF_SIZE,
GFP_KERNEL);
fifo->vdev[vdev_id] = tm_vdev;
/* Register the virtio device. */
ret = register_virtio_device(&tm_vdev->vdev);
reg_dev = tm_vdev;
if (ret) {
dev_err(dev, "register_virtio_device failed\n");
goto vdev_fail;
}
mutex_unlock(&fifo->lock);
return 0;
vdev_fail:
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
fifo->vdev[vdev_id] = NULL;
if (reg_dev)
put_device(&tm_vdev->vdev.dev);
else
kfree(tm_vdev);
fail:
mutex_unlock(&fifo->lock);
return ret;
}
/* Delete vdev for the FIFO. */
static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
{
struct mlxbf_tmfifo_vdev *tm_vdev;
mutex_lock(&fifo->lock);
/* Unregister vdev. */
tm_vdev = fifo->vdev[vdev_id];
if (tm_vdev) {
unregister_virtio_device(&tm_vdev->vdev);
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
fifo->vdev[vdev_id] = NULL;
}
mutex_unlock(&fifo->lock);
return 0;
}
/* Read the configured network MAC address from efi variable. */
static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
{
efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
unsigned long size = ETH_ALEN;
u8 buf[ETH_ALEN];
efi_status_t rc;
rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
if (rc == EFI_SUCCESS && size == ETH_ALEN)
ether_addr_copy(mac, buf);
else
ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
}
/* Set TmFifo thresolds which is used to trigger interrupts. */
static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
{
u64 ctl;
/* Get Tx FIFO size and set the low/high watermark. */
ctl = readq(fifo->tx.ctl);
fifo->tx_fifo_size =
FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
fifo->tx_fifo_size / 2);
ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
fifo->tx_fifo_size - 1);
writeq(ctl, fifo->tx.ctl);
/* Get Rx FIFO size and set the low/high watermark. */
ctl = readq(fifo->rx.ctl);
fifo->rx_fifo_size =
FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
writeq(ctl, fifo->rx.ctl);
}
static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
{
int i;
fifo->is_ready = false;
del_timer_sync(&fifo->timer);
mlxbf_tmfifo_disable_irqs(fifo);
cancel_work_sync(&fifo->work);
for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
mlxbf_tmfifo_delete_vdev(fifo, i);
}
/* Probe the TMFIFO. */
static int mlxbf_tmfifo_probe(struct platform_device *pdev)
{
struct virtio_net_config net_config;
struct device *dev = &pdev->dev;
struct mlxbf_tmfifo *fifo;
u64 dev_id;
int i, rc;
rc = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &dev_id);
if (rc) {
dev_err(dev, "Cannot retrieve UID\n");
return rc;
}
fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return -ENOMEM;
spin_lock_init(&fifo->spin_lock[0]);
spin_lock_init(&fifo->spin_lock[1]);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
/* Get the resource of the Rx FIFO. */
fifo->res0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fifo->res0))
return PTR_ERR(fifo->res0);
/* Get the resource of the Tx FIFO. */
fifo->res1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fifo->res1))
return PTR_ERR(fifo->res1);
if (dev_id == TMFIFO_BF3_UID) {
fifo->rx.ctl = fifo->res1 + MLXBF_TMFIFO_RX_CTL_BF3;
fifo->rx.sts = fifo->res1 + MLXBF_TMFIFO_RX_STS_BF3;
fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA_BF3;
fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL_BF3;
fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS_BF3;
fifo->tx.data = fifo->res0 + MLXBF_TMFIFO_TX_DATA_BF3;
} else {
fifo->rx.ctl = fifo->res0 + MLXBF_TMFIFO_RX_CTL;
fifo->rx.sts = fifo->res0 + MLXBF_TMFIFO_RX_STS;
fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA;
fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL;
fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS;
fifo->tx.data = fifo->res1 + MLXBF_TMFIFO_TX_DATA;
}
platform_set_drvdata(pdev, fifo);
timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
fifo->irq_info[i].index = i;
fifo->irq_info[i].fifo = fifo;
fifo->irq_info[i].irq = platform_get_irq(pdev, i);
rc = devm_request_irq(dev, fifo->irq_info[i].irq,
mlxbf_tmfifo_irq_handler, 0,
"tmfifo", &fifo->irq_info[i]);
if (rc) {
dev_err(dev, "devm_request_irq failed\n");
fifo->irq_info[i].irq = 0;
return rc;
}
}
mlxbf_tmfifo_set_threshold(fifo);
/* Create the console vdev. */
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
if (rc)
goto fail;
/* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config));
/* A legacy-only interface for now. */
net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
ETH_DATA_LEN);
net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
VIRTIO_NET_S_LINK_UP);
mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config,
sizeof(net_config));
if (rc)
goto fail;
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
/* Make all updates visible before setting the 'is_ready' flag. */
virtio_mb(false);
fifo->is_ready = true;
return 0;
fail:
mlxbf_tmfifo_cleanup(fifo);
return rc;
}
/* Device remove function. */
static int mlxbf_tmfifo_remove(struct platform_device *pdev)
{
struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
mlxbf_tmfifo_cleanup(fifo);
return 0;
}
static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
{ "MLNXBF01", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
static struct platform_driver mlxbf_tmfifo_driver = {
.probe = mlxbf_tmfifo_probe,
.remove = mlxbf_tmfifo_remove,
.driver = {
.name = "bf-tmfifo",
.acpi_match_table = mlxbf_tmfifo_acpi_match,
},
};
module_platform_driver(mlxbf_tmfifo_driver);
MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mellanox Technologies");
| linux-master | drivers/platform/mellanox/mlxbf-tmfifo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic Loongson processor based LAPTOP/ALL-IN-ONE driver
*
* Jianmin Lv <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
#include <acpi/video.h>
/* 1. Driver-wide structs and misc. variables */
/* ACPI HIDs */
#define LOONGSON_ACPI_EC_HID "PNP0C09"
#define LOONGSON_ACPI_HKEY_HID "LOON0000"
#define ACPI_LAPTOP_NAME "loongson-laptop"
#define ACPI_LAPTOP_ACPI_EVENT_PREFIX "loongson"
#define MAX_ACPI_ARGS 3
#define GENERIC_HOTKEY_MAP_MAX 64
#define GENERIC_EVENT_TYPE_OFF 12
#define GENERIC_EVENT_TYPE_MASK 0xF000
#define GENERIC_EVENT_CODE_MASK 0x0FFF
struct generic_sub_driver {
u32 type;
char *name;
acpi_handle *handle;
struct acpi_device *device;
struct platform_driver *driver;
int (*init)(struct generic_sub_driver *sub_driver);
void (*notify)(struct generic_sub_driver *sub_driver, u32 event);
u8 acpi_notify_installed;
};
static u32 input_device_registered;
static struct input_dev *generic_inputdev;
static acpi_handle hotkey_handle;
static struct key_entry hotkey_keycode_map[GENERIC_HOTKEY_MAP_MAX];
int loongson_laptop_turn_on_backlight(void);
int loongson_laptop_turn_off_backlight(void);
static int loongson_laptop_backlight_update(struct backlight_device *bd);
/* 2. ACPI Helpers and device model */
static int acpi_evalf(acpi_handle handle, int *res, char *method, char *fmt, ...)
{
char res_type;
char *fmt0 = fmt;
va_list ap;
int success, quiet;
acpi_status status;
struct acpi_object_list params;
struct acpi_buffer result, *resultp;
union acpi_object in_objs[MAX_ACPI_ARGS], out_obj;
if (!*fmt) {
pr_err("acpi_evalf() called with empty format\n");
return 0;
}
if (*fmt == 'q') {
quiet = 1;
fmt++;
} else
quiet = 0;
res_type = *(fmt++);
params.count = 0;
params.pointer = &in_objs[0];
va_start(ap, fmt);
while (*fmt) {
char c = *(fmt++);
switch (c) {
case 'd': /* int */
in_objs[params.count].integer.value = va_arg(ap, int);
in_objs[params.count++].type = ACPI_TYPE_INTEGER;
break;
/* add more types as needed */
default:
pr_err("acpi_evalf() called with invalid format character '%c'\n", c);
va_end(ap);
return 0;
}
}
va_end(ap);
if (res_type != 'v') {
result.length = sizeof(out_obj);
result.pointer = &out_obj;
resultp = &result;
} else
resultp = NULL;
status = acpi_evaluate_object(handle, method, ¶ms, resultp);
switch (res_type) {
case 'd': /* int */
success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER);
if (success && res)
*res = out_obj.integer.value;
break;
case 'v': /* void */
success = status == AE_OK;
break;
/* add more types as needed */
default:
pr_err("acpi_evalf() called with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
pr_err("acpi_evalf(%s, %s, ...) failed: %s\n",
method, fmt0, acpi_format_exception(status));
return success;
}
static int hotkey_status_get(int *status)
{
if (!acpi_evalf(hotkey_handle, status, "GSWS", "d"))
return -EIO;
return 0;
}
static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct generic_sub_driver *sub_driver = data;
if (!sub_driver || !sub_driver->notify)
return;
sub_driver->notify(sub_driver, event);
}
static int __init setup_acpi_notify(struct generic_sub_driver *sub_driver)
{
acpi_status status;
if (!*sub_driver->handle)
return 0;
sub_driver->device = acpi_fetch_acpi_dev(*sub_driver->handle);
if (!sub_driver->device) {
pr_err("acpi_fetch_acpi_dev(%s) failed\n", sub_driver->name);
return -ENODEV;
}
sub_driver->device->driver_data = sub_driver;
sprintf(acpi_device_class(sub_driver->device), "%s/%s",
ACPI_LAPTOP_ACPI_EVENT_PREFIX, sub_driver->name);
status = acpi_install_notify_handler(*sub_driver->handle,
sub_driver->type, dispatch_acpi_notify, sub_driver);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
pr_notice("Another device driver is already "
"handling %s events\n", sub_driver->name);
} else {
pr_err("acpi_install_notify_handler(%s) failed: %s\n",
sub_driver->name, acpi_format_exception(status));
}
return -ENODEV;
}
sub_driver->acpi_notify_installed = 1;
return 0;
}
static int loongson_hotkey_suspend(struct device *dev)
{
return 0;
}
static int loongson_hotkey_resume(struct device *dev)
{
int status = 0;
struct key_entry ke;
struct backlight_device *bd;
bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM);
if (bd) {
loongson_laptop_backlight_update(bd) ?
pr_warn("Loongson_backlight: resume brightness failed") :
pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness);
}
/*
* Only if the firmware supports SW_LID event model, we can handle the
* event. This is for the consideration of development board without EC.
*/
if (test_bit(SW_LID, generic_inputdev->swbit)) {
if (hotkey_status_get(&status) < 0)
return -EIO;
/*
* The input device sw element records the last lid status.
* When the system is awakened by other wake-up sources,
* the lid event will also be reported. The judgment of
* adding SW_LID bit which in sw element can avoid this
* case.
*
* Input system will drop lid event when current lid event
* value and last lid status in the same. So laptop driver
* doesn't report repeated events.
*
* Lid status is generally 0, but hardware exception is
* considered. So add lid status confirmation.
*/
if (test_bit(SW_LID, generic_inputdev->sw) && !(status & (1 << SW_LID))) {
ke.type = KE_SW;
ke.sw.value = (u8)status;
ke.sw.code = SW_LID;
sparse_keymap_report_entry(generic_inputdev, &ke, 1, true);
}
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(loongson_hotkey_pm,
loongson_hotkey_suspend, loongson_hotkey_resume);
static int loongson_hotkey_probe(struct platform_device *pdev)
{
hotkey_handle = ACPI_HANDLE(&pdev->dev);
if (!hotkey_handle)
return -ENODEV;
return 0;
}
static const struct acpi_device_id loongson_device_ids[] = {
{LOONGSON_ACPI_HKEY_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, loongson_device_ids);
static struct platform_driver loongson_hotkey_driver = {
.probe = loongson_hotkey_probe,
.driver = {
.name = "loongson-hotkey",
.owner = THIS_MODULE,
.pm = pm_ptr(&loongson_hotkey_pm),
.acpi_match_table = loongson_device_ids,
},
};
static int hotkey_map(void)
{
u32 index;
acpi_status status;
struct acpi_buffer buf;
union acpi_object *pack;
buf.length = ACPI_ALLOCATE_BUFFER;
status = acpi_evaluate_object_typed(hotkey_handle, "KMAP", NULL, &buf, ACPI_TYPE_PACKAGE);
if (status != AE_OK) {
pr_err("ACPI exception: %s\n", acpi_format_exception(status));
return -1;
}
pack = buf.pointer;
for (index = 0; index < pack->package.count; index++) {
union acpi_object *element, *sub_pack;
sub_pack = &pack->package.elements[index];
element = &sub_pack->package.elements[0];
hotkey_keycode_map[index].type = element->integer.value;
element = &sub_pack->package.elements[1];
hotkey_keycode_map[index].code = element->integer.value;
element = &sub_pack->package.elements[2];
hotkey_keycode_map[index].keycode = element->integer.value;
}
return 0;
}
static int hotkey_backlight_set(bool enable)
{
if (!acpi_evalf(hotkey_handle, NULL, "VCBL", "vd", enable ? 1 : 0))
return -EIO;
return 0;
}
static int ec_get_brightness(void)
{
int status = 0;
if (!hotkey_handle)
return -ENXIO;
if (!acpi_evalf(hotkey_handle, &status, "ECBG", "d"))
return -EIO;
return status;
}
static int ec_set_brightness(int level)
{
int ret = 0;
if (!hotkey_handle)
return -ENXIO;
if (!acpi_evalf(hotkey_handle, NULL, "ECBS", "vd", level))
ret = -EIO;
return ret;
}
static int ec_backlight_level(u8 level)
{
int status = 0;
if (!hotkey_handle)
return -ENXIO;
if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
return -EIO;
if ((status < 0) || (level > status))
return status;
if (!acpi_evalf(hotkey_handle, &status, "ECSL", "d"))
return -EIO;
if ((status < 0) || (level < status))
return status;
return level;
}
static int loongson_laptop_backlight_update(struct backlight_device *bd)
{
int lvl = ec_backlight_level(bd->props.brightness);
if (lvl < 0)
return -EIO;
if (ec_set_brightness(lvl))
return -EIO;
return 0;
}
static int loongson_laptop_get_brightness(struct backlight_device *bd)
{
int level;
level = ec_get_brightness();
if (level < 0)
return -EIO;
return level;
}
static const struct backlight_ops backlight_laptop_ops = {
.update_status = loongson_laptop_backlight_update,
.get_brightness = loongson_laptop_get_brightness,
};
static int laptop_backlight_register(void)
{
int status = 0;
struct backlight_properties props;
memset(&props, 0, sizeof(props));
if (!acpi_evalf(hotkey_handle, &status, "ECLL", "d"))
return -EIO;
props.brightness = 1;
props.max_brightness = status;
props.type = BACKLIGHT_PLATFORM;
backlight_device_register("loongson_laptop",
NULL, NULL, &backlight_laptop_ops, &props);
return 0;
}
int loongson_laptop_turn_on_backlight(void)
{
int status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
arg0.integer.value = 1;
status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
if (ACPI_FAILURE(status)) {
pr_info("Loongson lvds error: 0x%x\n", status);
return -ENODEV;
}
return 0;
}
int loongson_laptop_turn_off_backlight(void)
{
int status;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
arg0.integer.value = 0;
status = acpi_evaluate_object(NULL, "\\BLSW", &args, NULL);
if (ACPI_FAILURE(status)) {
pr_info("Loongson lvds error: 0x%x\n", status);
return -ENODEV;
}
return 0;
}
static int __init event_init(struct generic_sub_driver *sub_driver)
{
int ret;
ret = hotkey_map();
if (ret < 0) {
pr_err("Failed to parse keymap from DSDT\n");
return ret;
}
ret = sparse_keymap_setup(generic_inputdev, hotkey_keycode_map, NULL);
if (ret < 0) {
pr_err("Failed to setup input device keymap\n");
input_free_device(generic_inputdev);
generic_inputdev = NULL;
return ret;
}
/*
* This hotkey driver handle backlight event when
* acpi_video_get_backlight_type() gets acpi_backlight_vendor
*/
if (acpi_video_get_backlight_type() == acpi_backlight_vendor)
hotkey_backlight_set(true);
else
hotkey_backlight_set(false);
pr_info("ACPI: enabling firmware HKEY event interface...\n");
return ret;
}
static void event_notify(struct generic_sub_driver *sub_driver, u32 event)
{
int type, scan_code;
struct key_entry *ke = NULL;
scan_code = event & GENERIC_EVENT_CODE_MASK;
type = (event & GENERIC_EVENT_TYPE_MASK) >> GENERIC_EVENT_TYPE_OFF;
ke = sparse_keymap_entry_from_scancode(generic_inputdev, scan_code);
if (ke) {
if (type == KE_SW) {
int status = 0;
if (hotkey_status_get(&status) < 0)
return;
ke->sw.value = !!(status & (1 << ke->sw.code));
}
sparse_keymap_report_entry(generic_inputdev, ke, 1, true);
}
}
/* 3. Infrastructure */
static void generic_subdriver_exit(struct generic_sub_driver *sub_driver);
static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver)
{
int ret;
if (!sub_driver || !sub_driver->driver)
return -EINVAL;
ret = platform_driver_register(sub_driver->driver);
if (ret)
return -EINVAL;
if (sub_driver->init) {
ret = sub_driver->init(sub_driver);
if (ret)
goto err_out;
}
if (sub_driver->notify) {
ret = setup_acpi_notify(sub_driver);
if (ret == -ENODEV) {
ret = 0;
goto err_out;
}
if (ret < 0)
goto err_out;
}
return 0;
err_out:
generic_subdriver_exit(sub_driver);
return ret;
}
static void generic_subdriver_exit(struct generic_sub_driver *sub_driver)
{
if (sub_driver->acpi_notify_installed) {
acpi_remove_notify_handler(*sub_driver->handle,
sub_driver->type, dispatch_acpi_notify);
sub_driver->acpi_notify_installed = 0;
}
platform_driver_unregister(sub_driver->driver);
}
static struct generic_sub_driver generic_sub_drivers[] __refdata = {
{
.name = "hotkey",
.init = event_init,
.notify = event_notify,
.handle = &hotkey_handle,
.type = ACPI_DEVICE_NOTIFY,
.driver = &loongson_hotkey_driver,
},
};
static int __init generic_acpi_laptop_init(void)
{
bool ec_found;
int i, ret, status;
if (acpi_disabled)
return -ENODEV;
/* The EC device is required */
ec_found = acpi_dev_found(LOONGSON_ACPI_EC_HID);
if (!ec_found)
return -ENODEV;
/* Enable SCI for EC */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
generic_inputdev = input_allocate_device();
if (!generic_inputdev) {
pr_err("Unable to allocate input device\n");
return -ENOMEM;
}
/* Prepare input device, but don't register */
generic_inputdev->name =
"Loongson Generic Laptop/All-in-One Extra Buttons";
generic_inputdev->phys = ACPI_LAPTOP_NAME "/input0";
generic_inputdev->id.bustype = BUS_HOST;
generic_inputdev->dev.parent = NULL;
/* Init subdrivers */
for (i = 0; i < ARRAY_SIZE(generic_sub_drivers); i++) {
ret = generic_subdriver_init(&generic_sub_drivers[i]);
if (ret < 0) {
input_free_device(generic_inputdev);
while (--i >= 0)
generic_subdriver_exit(&generic_sub_drivers[i]);
return ret;
}
}
ret = input_register_device(generic_inputdev);
if (ret < 0) {
input_free_device(generic_inputdev);
while (--i >= 0)
generic_subdriver_exit(&generic_sub_drivers[i]);
pr_err("Unable to register input device\n");
return ret;
}
input_device_registered = 1;
if (acpi_evalf(hotkey_handle, &status, "ECBG", "d")) {
pr_info("Loongson Laptop used, init brightness is 0x%x\n", status);
ret = laptop_backlight_register();
if (ret < 0)
pr_err("Loongson Laptop: laptop-backlight device register failed\n");
}
return 0;
}
static void __exit generic_acpi_laptop_exit(void)
{
if (generic_inputdev) {
if (input_device_registered)
input_unregister_device(generic_inputdev);
else
input_free_device(generic_inputdev);
}
}
module_init(generic_acpi_laptop_init);
module_exit(generic_acpi_laptop_exit);
MODULE_AUTHOR("Jianmin Lv <[email protected]>");
MODULE_AUTHOR("Huacai Chen <[email protected]>");
MODULE_DESCRIPTION("Loongson Laptop/All-in-One ACPI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/loongarch/loongson-laptop.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/io.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/platform_device.h>
static unsigned long acpi_iobase;
#define ACPI_PM_EVT_BLK (acpi_iobase + 0x00) /* 4 bytes */
#define ACPI_PM_CNT_BLK (acpi_iobase + 0x04) /* 2 bytes */
#define ACPI_PMA_CNT_BLK (acpi_iobase + 0x0F) /* 1 byte */
#define ACPI_PM_TMR_BLK (acpi_iobase + 0x18) /* 4 bytes */
#define ACPI_GPE0_BLK (acpi_iobase + 0x10) /* 8 bytes */
#define ACPI_END (acpi_iobase + 0x80)
#define PM_INDEX 0xCD6
#define PM_DATA 0xCD7
#define PM2_INDEX 0xCD0
#define PM2_DATA 0xCD1
static void pmio_write_index(u16 index, u8 reg, u8 value)
{
outb(reg, index);
outb(value, index + 1);
}
static u8 pmio_read_index(u16 index, u8 reg)
{
outb(reg, index);
return inb(index + 1);
}
void pm_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM_INDEX, reg, value);
}
EXPORT_SYMBOL(pm_iowrite);
u8 pm_ioread(u8 reg)
{
return pmio_read_index(PM_INDEX, reg);
}
EXPORT_SYMBOL(pm_ioread);
void pm2_iowrite(u8 reg, u8 value)
{
pmio_write_index(PM2_INDEX, reg, value);
}
EXPORT_SYMBOL(pm2_iowrite);
u8 pm2_ioread(u8 reg)
{
return pmio_read_index(PM2_INDEX, reg);
}
EXPORT_SYMBOL(pm2_ioread);
static void acpi_hw_clear_status(void)
{
u16 value;
/* PMStatus: Clear WakeStatus/PwrBtnStatus */
value = inw(ACPI_PM_EVT_BLK);
value |= (1 << 8 | 1 << 15);
outw(value, ACPI_PM_EVT_BLK);
/* GPEStatus: Clear all generated events */
outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
}
static void acpi_registers_setup(void)
{
u32 value;
/* PM Status Base */
pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
/* PM Control Base */
pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
/* GPM Base */
pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
/* ACPI End */
pm_iowrite(0x2e, ACPI_END & 0xff);
pm_iowrite(0x2f, ACPI_END >> 8);
/* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
* of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
pm_iowrite(0x0e, 1 << 3);
/* SCI_EN set */
outw(1, ACPI_PM_CNT_BLK);
/* Enable to generate SCI */
pm_iowrite(0x10, pm_ioread(0x10) | 1);
/* GPM3/GPM9 enable */
value = inl(ACPI_GPE0_BLK + 4);
outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
/* Set GPM9 as input */
pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
/* Set GPM9 as non-output */
pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
/* GPM3 config ACPI trigger SCIOUT */
pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
/* GPM9 config ACPI trigger SCIOUT */
pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
/* GPM3 config falling edge trigger */
pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
/* No wait for STPGNT# in ACPI Sx state */
pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
/* Set GPM3 pull-down enable */
value = pm2_ioread(0xf6);
value |= ((1 << 7) | (1 << 3));
pm2_iowrite(0xf6, value);
/* Set GPM9 pull-down enable */
value = pm2_ioread(0xf8);
value |= ((1 << 5) | (1 << 1));
pm2_iowrite(0xf8, value);
}
static int rs780e_acpi_probe(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res)
return -ENODEV;
/* SCI interrupt need acpi space, allocate here */
if (!request_region(res->start, resource_size(res), "acpi")) {
pr_err("RS780E-ACPI: Failed to request IO Region\n");
return -EBUSY;
}
acpi_iobase = res->start;
acpi_registers_setup();
acpi_hw_clear_status();
return 0;
}
static const struct of_device_id rs780e_acpi_match[] = {
{ .compatible = "loongson,rs780e-acpi" },
{},
};
static struct platform_driver rs780e_acpi_driver = {
.probe = rs780e_acpi_probe,
.driver = {
.name = "RS780E-ACPI",
.of_match_table = rs780e_acpi_match,
},
};
builtin_platform_driver(rs780e_acpi_driver);
| linux-master | drivers/platform/mips/rs780e-acpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021, Qing Zhang <[email protected]>
* Loongson-2K1000 reset support
*/
#include <linux/of_address.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#define PM1_STS 0x0c /* Power Management 1 Status Register */
#define PM1_CNT 0x14 /* Power Management 1 Control Register */
#define RST_CNT 0x30 /* Reset Control Register */
static void __iomem *base;
static void ls2k_restart(char *command)
{
writel(0x1, base + RST_CNT);
}
static void ls2k_poweroff(void)
{
/* Clear */
writel((readl(base + PM1_STS) & 0xffffffff), base + PM1_STS);
/* Sleep Enable | Soft Off*/
writel(GENMASK(12, 10) | BIT(13), base + PM1_CNT);
}
static int ls2k_reset_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "loongson,ls2k-pm");
if (!np) {
pr_info("Failed to get PM node\n");
return -ENODEV;
}
base = of_iomap(np, 0);
of_node_put(np);
if (!base) {
pr_info("Failed to map PM register base address\n");
return -ENOMEM;
}
_machine_restart = ls2k_restart;
pm_power_off = ls2k_poweroff;
return 0;
}
arch_initcall(ls2k_reset_init);
| linux-master | drivers/platform/mips/ls2k-reset.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/err.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <loongson.h>
#include <boot_param.h>
#include <loongson_hwmon.h>
#include <loongson_regs.h>
static int csr_temp_enable;
/*
* Loongson-3 series cpu has two sensors inside,
* each of them from 0 to 255,
* if more than 127, that is dangerous.
* here only provide sensor1 data, because it always hot than sensor0
*/
int loongson3_cpu_temp(int cpu)
{
u32 reg, prid_rev;
if (csr_temp_enable) {
reg = (csr_readl(LOONGSON_CSR_CPUTEMP) & 0xff);
goto out;
}
reg = LOONGSON_CHIPTEMP(cpu);
prid_rev = read_c0_prid() & PRID_REV_MASK;
switch (prid_rev) {
case PRID_REV_LOONGSON3A_R1:
reg = (reg >> 8) & 0xff;
break;
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
reg = ((reg >> 8) & 0xff) - 100;
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
default:
reg = (reg & 0xffff) * 731 / 0x4000 - 273;
break;
}
out:
return (int)reg * 1000;
}
static int nr_packages;
static struct device *cpu_hwmon_dev;
static ssize_t cpu_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
int id = (to_sensor_dev_attr(attr))->index - 1;
return sprintf(buf, "CPU %d Temperature\n", id);
}
static ssize_t get_cpu_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
int id = (to_sensor_dev_attr(attr))->index - 1;
int value = loongson3_cpu_temp(id);
return sprintf(buf, "%d\n", value);
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3);
static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4);
static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4);
static struct attribute *cpu_hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_label.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_label.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_label.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp4_label.dev_attr.attr,
NULL
};
static umode_t cpu_hwmon_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
int id = i / 2;
if (id < nr_packages)
return attr->mode;
return 0;
}
static struct attribute_group cpu_hwmon_group = {
.attrs = cpu_hwmon_attributes,
.is_visible = cpu_hwmon_is_visible,
};
static const struct attribute_group *cpu_hwmon_groups[] = {
&cpu_hwmon_group,
NULL
};
#define CPU_THERMAL_THRESHOLD 90000
static struct delayed_work thermal_work;
static void do_thermal_timer(struct work_struct *work)
{
int i, value;
for (i = 0; i < nr_packages; i++) {
value = loongson3_cpu_temp(i);
if (value > CPU_THERMAL_THRESHOLD) {
pr_emerg("Power off due to high temp: %d\n", value);
orderly_poweroff(true);
}
}
schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000));
}
static int __init loongson_hwmon_init(void)
{
pr_info("Loongson Hwmon Enter...\n");
if (cpu_has_csr())
csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) &
LOONGSON_CSRF_TEMP;
nr_packages = loongson_sysconf.nr_cpus /
loongson_sysconf.cores_per_package;
cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon",
NULL, cpu_hwmon_groups);
if (IS_ERR(cpu_hwmon_dev)) {
pr_err("hwmon_device_register fail!\n");
return PTR_ERR(cpu_hwmon_dev);
}
INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer);
schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000));
return 0;
}
static void __exit loongson_hwmon_exit(void)
{
cancel_delayed_work_sync(&thermal_work);
hwmon_device_unregister(cpu_hwmon_dev);
}
module_init(loongson_hwmon_init);
module_exit(loongson_hwmon_exit);
MODULE_AUTHOR("Yu Xiang <[email protected]>");
MODULE_AUTHOR("Huacai Chen <[email protected]>");
MODULE_DESCRIPTION("Loongson CPU Hwmon driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/mips/cpu_hwmon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2009-2010 Intel Corporation
*
* Authors:
* Jesse Barnes <[email protected]>
*/
/*
* Some Intel Ibex Peak based platforms support so-called "intelligent
* power sharing", which allows the CPU and GPU to cooperate to maximize
* performance within a given TDP (thermal design point). This driver
* performs the coordination between the CPU and GPU, monitors thermal and
* power statistics in the platform, and initializes power monitoring
* hardware. It also provides a few tunables to control behavior. Its
* primary purpose is to safely allow CPU and GPU turbo modes to be enabled
* by tracking power and thermal budget; secondarily it can boost turbo
* performance by allocating more power or thermal budget to the CPU or GPU
* based on available headroom and activity.
*
* The basic algorithm is driven by a 5s moving average of temperature. If
* thermal headroom is available, the CPU and/or GPU power clamps may be
* adjusted upwards. If we hit the thermal ceiling or a thermal trigger,
* we scale back the clamp. Aside from trigger events (when we're critically
* close or over our TDP) we don't adjust the clamps more than once every
* five seconds.
*
* The thermal device (device 31, function 6) has a set of registers that
* are updated by the ME firmware. The ME should also take the clamp values
* written to those registers and write them to the CPU, but we currently
* bypass that functionality and write the CPU MSR directly.
*
* UNSUPPORTED:
* - dual MCP configs
*
* TODO:
* - handle CPU hotplug
* - provide turbo enable/disable api
*
* Related documents:
* - CDI 403777, 403778 - Auburndale EDS vol 1 & 2
* - CDI 401376 - Ibex Peak EDS
* - ref 26037, 26641 - IPS BIOS spec
* - ref 26489 - Nehalem BIOS writer's guide
* - ref 26921 - Ibex Peak BIOS Specification
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/tick.h>
#include <linux/timer.h>
#include <linux/dmi.h>
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
#include "intel_ips.h"
#include <linux/io-64-nonatomic-lo-hi.h>
#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
/*
* Package level MSRs for monitor/control
*/
#define PLATFORM_INFO 0xce
#define PLATFORM_TDP (1<<29)
#define PLATFORM_RATIO (1<<28)
#define IA32_MISC_ENABLE 0x1a0
#define IA32_MISC_TURBO_EN (1ULL<<38)
#define TURBO_POWER_CURRENT_LIMIT 0x1ac
#define TURBO_TDC_OVR_EN (1UL<<31)
#define TURBO_TDC_MASK (0x000000007fff0000UL)
#define TURBO_TDC_SHIFT (16)
#define TURBO_TDP_OVR_EN (1UL<<15)
#define TURBO_TDP_MASK (0x0000000000003fffUL)
/*
* Core/thread MSRs for monitoring
*/
#define IA32_PERF_CTL 0x199
#define IA32_PERF_TURBO_DIS (1ULL<<32)
/*
* Thermal PCI device regs
*/
#define THM_CFG_TBAR 0x10
#define THM_CFG_TBAR_HI 0x14
#define THM_TSIU 0x00
#define THM_TSE 0x01
#define TSE_EN 0xb8
#define THM_TSS 0x02
#define THM_TSTR 0x03
#define THM_TSTTP 0x04
#define THM_TSCO 0x08
#define THM_TSES 0x0c
#define THM_TSGPEN 0x0d
#define TSGPEN_HOT_LOHI (1<<1)
#define TSGPEN_CRIT_LOHI (1<<2)
#define THM_TSPC 0x0e
#define THM_PPEC 0x10
#define THM_CTA 0x12
#define THM_PTA 0x14
#define PTA_SLOPE_MASK (0xff00)
#define PTA_SLOPE_SHIFT 8
#define PTA_OFFSET_MASK (0x00ff)
#define THM_MGTA 0x16
#define MGTA_SLOPE_MASK (0xff00)
#define MGTA_SLOPE_SHIFT 8
#define MGTA_OFFSET_MASK (0x00ff)
#define THM_TRC 0x1a
#define TRC_CORE2_EN (1<<15)
#define TRC_THM_EN (1<<12)
#define TRC_C6_WAR (1<<8)
#define TRC_CORE1_EN (1<<7)
#define TRC_CORE_PWR (1<<6)
#define TRC_PCH_EN (1<<5)
#define TRC_MCH_EN (1<<4)
#define TRC_DIMM4 (1<<3)
#define TRC_DIMM3 (1<<2)
#define TRC_DIMM2 (1<<1)
#define TRC_DIMM1 (1<<0)
#define THM_TES 0x20
#define THM_TEN 0x21
#define TEN_UPDATE_EN 1
#define THM_PSC 0x24
#define PSC_NTG (1<<0) /* No GFX turbo support */
#define PSC_NTPC (1<<1) /* No CPU turbo support */
#define PSC_PP_DEF (0<<2) /* Perf policy up to driver */
#define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */
#define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */
#define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */
#define PSP_PBRT (1<<4) /* BIOS run time support */
#define THM_CTV1 0x30
#define CTV_TEMP_ERROR (1<<15)
#define CTV_TEMP_MASK 0x3f
#define CTV_
#define THM_CTV2 0x32
#define THM_CEC 0x34 /* undocumented power accumulator in joules */
#define THM_AE 0x3f
#define THM_HTS 0x50 /* 32 bits */
#define HTS_PCPL_MASK (0x7fe00000)
#define HTS_PCPL_SHIFT 21
#define HTS_GPL_MASK (0x001ff000)
#define HTS_GPL_SHIFT 12
#define HTS_PP_MASK (0x00000c00)
#define HTS_PP_SHIFT 10
#define HTS_PP_DEF 0
#define HTS_PP_PROC 1
#define HTS_PP_BAL 2
#define HTS_PP_GFX 3
#define HTS_PCTD_DIS (1<<9)
#define HTS_GTD_DIS (1<<8)
#define HTS_PTL_MASK (0x000000fe)
#define HTS_PTL_SHIFT 1
#define HTS_NVV (1<<0)
#define THM_HTSHI 0x54 /* 16 bits */
#define HTS2_PPL_MASK (0x03ff)
#define HTS2_PRST_MASK (0x3c00)
#define HTS2_PRST_SHIFT 10
#define HTS2_PRST_UNLOADED 0
#define HTS2_PRST_RUNNING 1
#define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */
#define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */
#define HTS2_PRST_TDISUSR 4 /* user disabled turbo */
#define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */
#define HTS2_PRST_TDISPM 6 /* power management disabled turbo */
#define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */
#define THM_PTL 0x56
#define THM_MGTV 0x58
#define TV_MASK 0x000000000000ff00
#define TV_SHIFT 8
#define THM_PTV 0x60
#define PTV_MASK 0x00ff
#define THM_MMGPC 0x64
#define THM_MPPC 0x66
#define THM_MPCPC 0x68
#define THM_TSPIEN 0x82
#define TSPIEN_AUX_LOHI (1<<0)
#define TSPIEN_HOT_LOHI (1<<1)
#define TSPIEN_CRIT_LOHI (1<<2)
#define TSPIEN_AUX2_LOHI (1<<3)
#define THM_TSLOCK 0x83
#define THM_ATR 0x84
#define THM_TOF 0x87
#define THM_STS 0x98
#define STS_PCPL_MASK (0x7fe00000)
#define STS_PCPL_SHIFT 21
#define STS_GPL_MASK (0x001ff000)
#define STS_GPL_SHIFT 12
#define STS_PP_MASK (0x00000c00)
#define STS_PP_SHIFT 10
#define STS_PP_DEF 0
#define STS_PP_PROC 1
#define STS_PP_BAL 2
#define STS_PP_GFX 3
#define STS_PCTD_DIS (1<<9)
#define STS_GTD_DIS (1<<8)
#define STS_PTL_MASK (0x000000fe)
#define STS_PTL_SHIFT 1
#define STS_NVV (1<<0)
#define THM_SEC 0x9c
#define SEC_ACK (1<<0)
#define THM_TC3 0xa4
#define THM_TC1 0xa8
#define STS_PPL_MASK (0x0003ff00)
#define STS_PPL_SHIFT 16
#define THM_TC2 0xac
#define THM_DTV 0xb0
#define THM_ITV 0xd8
#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */
#define ITV_ME_SEQNO_SHIFT (16)
#define ITV_MCH_TEMP_MASK 0x0000ff00
#define ITV_MCH_TEMP_SHIFT (8)
#define ITV_PCH_TEMP_MASK 0x000000ff
#define thm_readb(off) readb(ips->regmap + (off))
#define thm_readw(off) readw(ips->regmap + (off))
#define thm_readl(off) readl(ips->regmap + (off))
#define thm_readq(off) readq(ips->regmap + (off))
#define thm_writeb(off, val) writeb((val), ips->regmap + (off))
#define thm_writew(off, val) writew((val), ips->regmap + (off))
#define thm_writel(off, val) writel((val), ips->regmap + (off))
static const int IPS_ADJUST_PERIOD = 5000; /* ms */
static bool late_i915_load = false;
/* For initial average collection */
static const int IPS_SAMPLE_PERIOD = 200; /* ms */
static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
#define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD)
/* Per-SKU limits */
struct ips_mcp_limits {
int mcp_power_limit; /* mW units */
int core_power_limit;
int mch_power_limit;
int core_temp_limit; /* degrees C */
int mch_temp_limit;
};
/* Max temps are -10 degrees C to avoid PROCHOT# */
static struct ips_mcp_limits ips_sv_limits = {
.mcp_power_limit = 35000,
.core_power_limit = 29000,
.mch_power_limit = 20000,
.core_temp_limit = 95,
.mch_temp_limit = 90
};
static struct ips_mcp_limits ips_lv_limits = {
.mcp_power_limit = 25000,
.core_power_limit = 21000,
.mch_power_limit = 13000,
.core_temp_limit = 95,
.mch_temp_limit = 90
};
static struct ips_mcp_limits ips_ulv_limits = {
.mcp_power_limit = 18000,
.core_power_limit = 14000,
.mch_power_limit = 11000,
.core_temp_limit = 95,
.mch_temp_limit = 90
};
struct ips_driver {
struct device *dev;
void __iomem *regmap;
int irq;
struct task_struct *monitor;
struct task_struct *adjust;
struct dentry *debug_root;
struct timer_list timer;
/* Average CPU core temps (all averages in .01 degrees C for precision) */
u16 ctv1_avg_temp;
u16 ctv2_avg_temp;
/* GMCH average */
u16 mch_avg_temp;
/* Average for the CPU (both cores?) */
u16 mcp_avg_temp;
/* Average power consumption (in mW) */
u32 cpu_avg_power;
u32 mch_avg_power;
/* Offset values */
u16 cta_val;
u16 pta_val;
u16 mgta_val;
/* Maximums & prefs, protected by turbo status lock */
spinlock_t turbo_status_lock;
u16 mcp_temp_limit;
u16 mcp_power_limit;
u16 core_power_limit;
u16 mch_power_limit;
bool cpu_turbo_enabled;
bool __cpu_turbo_on;
bool gpu_turbo_enabled;
bool __gpu_turbo_on;
bool gpu_preferred;
bool poll_turbo_status;
bool second_cpu;
bool turbo_toggle_allowed;
struct ips_mcp_limits *limits;
/* Optional MCH interfaces for if i915 is in use */
unsigned long (*read_mch_val)(void);
bool (*gpu_raise)(void);
bool (*gpu_lower)(void);
bool (*gpu_busy)(void);
bool (*gpu_turbo_disable)(void);
/* For restoration at unload */
u64 orig_turbo_limit;
u64 orig_turbo_ratios;
};
static bool
ips_gpu_turbo_enabled(struct ips_driver *ips);
/**
* ips_cpu_busy - is CPU busy?
* @ips: IPS driver struct
*
* Check CPU for load to see whether we should increase its thermal budget.
*
* RETURNS:
* True if the CPU could use more power, false otherwise.
*/
static bool ips_cpu_busy(struct ips_driver *ips)
{
if ((avenrun[0] >> FSHIFT) > 1)
return true;
return false;
}
/**
* ips_cpu_raise - raise CPU power clamp
* @ips: IPS driver struct
*
* Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for
* this platform.
*
* We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as
* long as we haven't hit the TDP limit for the SKU).
*/
static void ips_cpu_raise(struct ips_driver *ips)
{
u64 turbo_override;
u16 cur_tdp_limit, new_tdp_limit;
if (!ips->cpu_turbo_enabled)
return;
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
cur_tdp_limit = turbo_override & TURBO_TDP_MASK;
new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */
/* Clamp to SKU TDP limit */
if (((new_tdp_limit * 10) / 8) > ips->core_power_limit)
new_tdp_limit = cur_tdp_limit;
thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
turbo_override |= new_tdp_limit;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
}
/**
* ips_cpu_lower - lower CPU power clamp
* @ips: IPS driver struct
*
* Lower CPU power clamp b %IPS_CPU_STEP if possible.
*
* We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going
* as low as the platform limits will allow (though we could go lower there
* wouldn't be much point).
*/
static void ips_cpu_lower(struct ips_driver *ips)
{
u64 turbo_override;
u16 cur_limit, new_limit;
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
cur_limit = turbo_override & TURBO_TDP_MASK;
new_limit = cur_limit - 8; /* 1W decrease */
/* Clamp to SKU TDP limit */
if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK))
new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK;
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
turbo_override |= new_limit;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
}
/**
* do_enable_cpu_turbo - internal turbo enable function
* @data: unused
*
* Internal function for actually updating MSRs. When we enable/disable
* turbo, we need to do it on each CPU; this function is the one called
* by on_each_cpu() when needed.
*/
static void do_enable_cpu_turbo(void *data)
{
u64 perf_ctl;
rdmsrl(IA32_PERF_CTL, perf_ctl);
if (perf_ctl & IA32_PERF_TURBO_DIS) {
perf_ctl &= ~IA32_PERF_TURBO_DIS;
wrmsrl(IA32_PERF_CTL, perf_ctl);
}
}
/**
* ips_enable_cpu_turbo - enable turbo mode on all CPUs
* @ips: IPS driver struct
*
* Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on
* all logical threads.
*/
static void ips_enable_cpu_turbo(struct ips_driver *ips)
{
/* Already on, no need to mess with MSRs */
if (ips->__cpu_turbo_on)
return;
if (ips->turbo_toggle_allowed)
on_each_cpu(do_enable_cpu_turbo, ips, 1);
ips->__cpu_turbo_on = true;
}
/**
* do_disable_cpu_turbo - internal turbo disable function
* @data: unused
*
* Internal function for actually updating MSRs. When we enable/disable
* turbo, we need to do it on each CPU; this function is the one called
* by on_each_cpu() when needed.
*/
static void do_disable_cpu_turbo(void *data)
{
u64 perf_ctl;
rdmsrl(IA32_PERF_CTL, perf_ctl);
if (!(perf_ctl & IA32_PERF_TURBO_DIS)) {
perf_ctl |= IA32_PERF_TURBO_DIS;
wrmsrl(IA32_PERF_CTL, perf_ctl);
}
}
/**
* ips_disable_cpu_turbo - disable turbo mode on all CPUs
* @ips: IPS driver struct
*
* Disable turbo mode by setting the disable bit in IA32_PERF_CTL on
* all logical threads.
*/
static void ips_disable_cpu_turbo(struct ips_driver *ips)
{
/* Already off, leave it */
if (!ips->__cpu_turbo_on)
return;
if (ips->turbo_toggle_allowed)
on_each_cpu(do_disable_cpu_turbo, ips, 1);
ips->__cpu_turbo_on = false;
}
/**
* ips_gpu_busy - is GPU busy?
* @ips: IPS driver struct
*
* Check GPU for load to see whether we should increase its thermal budget.
* We need to call into the i915 driver in this case.
*
* RETURNS:
* True if the GPU could use more power, false otherwise.
*/
static bool ips_gpu_busy(struct ips_driver *ips)
{
if (!ips_gpu_turbo_enabled(ips))
return false;
return ips->gpu_busy();
}
/**
* ips_gpu_raise - raise GPU power clamp
* @ips: IPS driver struct
*
* Raise the GPU frequency/power if possible. We need to call into the
* i915 driver in this case.
*/
static void ips_gpu_raise(struct ips_driver *ips)
{
if (!ips_gpu_turbo_enabled(ips))
return;
if (!ips->gpu_raise())
ips->gpu_turbo_enabled = false;
return;
}
/**
* ips_gpu_lower - lower GPU power clamp
* @ips: IPS driver struct
*
* Lower GPU frequency/power if possible. Need to call i915.
*/
static void ips_gpu_lower(struct ips_driver *ips)
{
if (!ips_gpu_turbo_enabled(ips))
return;
if (!ips->gpu_lower())
ips->gpu_turbo_enabled = false;
return;
}
/**
* ips_enable_gpu_turbo - notify the gfx driver turbo is available
* @ips: IPS driver struct
*
* Call into the graphics driver indicating that it can safely use
* turbo mode.
*/
static void ips_enable_gpu_turbo(struct ips_driver *ips)
{
if (ips->__gpu_turbo_on)
return;
ips->__gpu_turbo_on = true;
}
/**
* ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode
* @ips: IPS driver struct
*
* Request that the graphics driver disable turbo mode.
*/
static void ips_disable_gpu_turbo(struct ips_driver *ips)
{
/* Avoid calling i915 if turbo is already disabled */
if (!ips->__gpu_turbo_on)
return;
if (!ips->gpu_turbo_disable())
dev_err(ips->dev, "failed to disable graphics turbo\n");
else
ips->__gpu_turbo_on = false;
}
/**
* mcp_exceeded - check whether we're outside our thermal & power limits
* @ips: IPS driver struct
*
* Check whether the MCP is over its thermal or power budget.
*/
static bool mcp_exceeded(struct ips_driver *ips)
{
unsigned long flags;
bool ret = false;
u32 temp_limit;
u32 avg_power;
spin_lock_irqsave(&ips->turbo_status_lock, flags);
temp_limit = ips->mcp_temp_limit * 100;
if (ips->mcp_avg_temp > temp_limit)
ret = true;
avg_power = ips->cpu_avg_power + ips->mch_avg_power;
if (avg_power > ips->mcp_power_limit)
ret = true;
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
return ret;
}
/**
* cpu_exceeded - check whether a CPU core is outside its limits
* @ips: IPS driver struct
* @cpu: CPU number to check
*
* Check a given CPU's average temp or power is over its limit.
*/
static bool cpu_exceeded(struct ips_driver *ips, int cpu)
{
unsigned long flags;
int avg;
bool ret = false;
spin_lock_irqsave(&ips->turbo_status_lock, flags);
avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp;
if (avg > (ips->limits->core_temp_limit * 100))
ret = true;
if (ips->cpu_avg_power > ips->core_power_limit * 100)
ret = true;
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
if (ret)
dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
return ret;
}
/**
* mch_exceeded - check whether the GPU is over budget
* @ips: IPS driver struct
*
* Check the MCH temp & power against their maximums.
*/
static bool mch_exceeded(struct ips_driver *ips)
{
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&ips->turbo_status_lock, flags);
if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100))
ret = true;
if (ips->mch_avg_power > ips->mch_power_limit)
ret = true;
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
return ret;
}
/**
* verify_limits - verify BIOS provided limits
* @ips: IPS structure
*
* BIOS can optionally provide non-default limits for power and temp. Check
* them here and use the defaults if the BIOS values are not provided or
* are otherwise unusable.
*/
static void verify_limits(struct ips_driver *ips)
{
if (ips->mcp_power_limit < ips->limits->mcp_power_limit ||
ips->mcp_power_limit > 35000)
ips->mcp_power_limit = ips->limits->mcp_power_limit;
if (ips->mcp_temp_limit < ips->limits->core_temp_limit ||
ips->mcp_temp_limit < ips->limits->mch_temp_limit ||
ips->mcp_temp_limit > 150)
ips->mcp_temp_limit = min(ips->limits->core_temp_limit,
ips->limits->mch_temp_limit);
}
/**
* update_turbo_limits - get various limits & settings from regs
* @ips: IPS driver struct
*
* Update the IPS power & temp limits, along with turbo enable flags,
* based on latest register contents.
*
* Used at init time and for runtime BIOS support, which requires polling
* the regs for updates (as a result of AC->DC transition for example).
*
* LOCKING:
* Caller must hold turbo_status_lock (outside of init)
*/
static void update_turbo_limits(struct ips_driver *ips)
{
u32 hts = thm_readl(THM_HTS);
ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS);
/*
* Disable turbo for now, until we can figure out why the power figures
* are wrong
*/
ips->cpu_turbo_enabled = false;
if (ips->gpu_busy)
ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS);
ips->core_power_limit = thm_readw(THM_MPCPC);
ips->mch_power_limit = thm_readw(THM_MMGPC);
ips->mcp_temp_limit = thm_readw(THM_PTL);
ips->mcp_power_limit = thm_readw(THM_MPPC);
verify_limits(ips);
/* Ignore BIOS CPU vs GPU pref */
}
/**
* ips_adjust - adjust power clamp based on thermal state
* @data: ips driver structure
*
* Wake up every 5s or so and check whether we should adjust the power clamp.
* Check CPU and GPU load to determine which needs adjustment. There are
* several things to consider here:
* - do we need to adjust up or down?
* - is CPU busy?
* - is GPU busy?
* - is CPU in turbo?
* - is GPU in turbo?
* - is CPU or GPU preferred? (CPU is default)
*
* So, given the above, we do the following:
* - up (TDP available)
* - CPU not busy, GPU not busy - nothing
* - CPU busy, GPU not busy - adjust CPU up
* - CPU not busy, GPU busy - adjust GPU up
* - CPU busy, GPU busy - adjust preferred unit up, taking headroom from
* non-preferred unit if necessary
* - down (at TDP limit)
* - adjust both CPU and GPU down if possible
*
cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu-
cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing
cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu-
cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu-
cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu-
*
*/
static int ips_adjust(void *data)
{
struct ips_driver *ips = data;
unsigned long flags;
dev_dbg(ips->dev, "starting ips-adjust thread\n");
/*
* Adjust CPU and GPU clamps every 5s if needed. Doing it more
* often isn't recommended due to ME interaction.
*/
do {
bool cpu_busy = ips_cpu_busy(ips);
bool gpu_busy = ips_gpu_busy(ips);
spin_lock_irqsave(&ips->turbo_status_lock, flags);
if (ips->poll_turbo_status)
update_turbo_limits(ips);
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
/* Update turbo status if necessary */
if (ips->cpu_turbo_enabled)
ips_enable_cpu_turbo(ips);
else
ips_disable_cpu_turbo(ips);
if (ips->gpu_turbo_enabled)
ips_enable_gpu_turbo(ips);
else
ips_disable_gpu_turbo(ips);
/* We're outside our comfort zone, crank them down */
if (mcp_exceeded(ips)) {
ips_cpu_lower(ips);
ips_gpu_lower(ips);
goto sleep;
}
if (!cpu_exceeded(ips, 0) && cpu_busy)
ips_cpu_raise(ips);
else
ips_cpu_lower(ips);
if (!mch_exceeded(ips) && gpu_busy)
ips_gpu_raise(ips);
else
ips_gpu_lower(ips);
sleep:
schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
} while (!kthread_should_stop());
dev_dbg(ips->dev, "ips-adjust thread stopped\n");
return 0;
}
/*
* Helpers for reading out temp/power values and calculating their
* averages for the decision making and monitoring functions.
*/
static u16 calc_avg_temp(struct ips_driver *ips, u16 *array)
{
u64 total = 0;
int i;
u16 avg;
for (i = 0; i < IPS_SAMPLE_COUNT; i++)
total += (u64)(array[i] * 100);
do_div(total, IPS_SAMPLE_COUNT);
avg = (u16)total;
return avg;
}
static u16 read_mgtv(struct ips_driver *ips)
{
u16 __maybe_unused ret;
u64 slope, offset;
u64 val;
val = thm_readq(THM_MGTV);
val = (val & TV_MASK) >> TV_SHIFT;
slope = offset = thm_readw(THM_MGTA);
slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT;
offset = offset & MGTA_OFFSET_MASK;
ret = ((val * slope + 0x40) >> 7) + offset;
return 0; /* MCH temp reporting buggy */
}
static u16 read_ptv(struct ips_driver *ips)
{
u16 val;
val = thm_readw(THM_PTV) & PTV_MASK;
return val;
}
static u16 read_ctv(struct ips_driver *ips, int cpu)
{
int reg = cpu ? THM_CTV2 : THM_CTV1;
u16 val;
val = thm_readw(reg);
if (!(val & CTV_TEMP_ERROR))
val = (val) >> 6; /* discard fractional component */
else
val = 0;
return val;
}
static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period)
{
u32 val;
u32 ret;
/*
* CEC is in joules/65535. Take difference over time to
* get watts.
*/
val = thm_readl(THM_CEC);
/* period is in ms and we want mW */
ret = (((val - *last) * 1000) / period);
ret = (ret * 1000) / 65535;
*last = val;
return 0;
}
static const u16 temp_decay_factor = 2;
static u16 update_average_temp(u16 avg, u16 val)
{
u16 ret;
/* Multiply by 100 for extra precision */
ret = (val * 100 / temp_decay_factor) +
(((temp_decay_factor - 1) * avg) / temp_decay_factor);
return ret;
}
static const u16 power_decay_factor = 2;
static u16 update_average_power(u32 avg, u32 val)
{
u32 ret;
ret = (val / power_decay_factor) +
(((power_decay_factor - 1) * avg) / power_decay_factor);
return ret;
}
static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
{
u64 total = 0;
u32 avg;
int i;
for (i = 0; i < IPS_SAMPLE_COUNT; i++)
total += array[i];
do_div(total, IPS_SAMPLE_COUNT);
avg = (u32)total;
return avg;
}
static void monitor_timeout(struct timer_list *t)
{
struct ips_driver *ips = from_timer(ips, t, timer);
wake_up_process(ips->monitor);
}
/**
* ips_monitor - temp/power monitoring thread
* @data: ips driver structure
*
* This is the main function for the IPS driver. It monitors power and
* tempurature in the MCP and adjusts CPU and GPU power clams accordingly.
*
* We keep a 5s moving average of power consumption and tempurature. Using
* that data, along with CPU vs GPU preference, we adjust the power clamps
* up or down.
*/
static int ips_monitor(void *data)
{
struct ips_driver *ips = data;
unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
int i;
u32 *cpu_samples, *mchp_samples, old_cpu_power;
u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples;
u8 cur_seqno, last_seqno;
mcp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
ctv1_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
ctv2_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
mch_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u16), GFP_KERNEL);
cpu_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
mchp_samples = kcalloc(IPS_SAMPLE_COUNT, sizeof(u32), GFP_KERNEL);
if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
!cpu_samples || !mchp_samples) {
dev_err(ips->dev,
"failed to allocate sample array, ips disabled\n");
kfree(mcp_samples);
kfree(ctv1_samples);
kfree(ctv2_samples);
kfree(mch_samples);
kfree(cpu_samples);
kfree(mchp_samples);
return -ENOMEM;
}
last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
ITV_ME_SEQNO_SHIFT;
seqno_timestamp = get_jiffies_64();
old_cpu_power = thm_readl(THM_CEC);
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
/* Collect an initial average */
for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
u32 mchp, cpu_power;
u16 val;
mcp_samples[i] = read_ptv(ips);
val = read_ctv(ips, 0);
ctv1_samples[i] = val;
val = read_ctv(ips, 1);
ctv2_samples[i] = val;
val = read_mgtv(ips);
mch_samples[i] = val;
cpu_power = get_cpu_power(ips, &old_cpu_power,
IPS_SAMPLE_PERIOD);
cpu_samples[i] = cpu_power;
if (ips->read_mch_val) {
mchp = ips->read_mch_val();
mchp_samples[i] = mchp;
}
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
if (kthread_should_stop())
break;
}
ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples);
ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples);
ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples);
ips->mch_avg_temp = calc_avg_temp(ips, mch_samples);
ips->cpu_avg_power = calc_avg_power(ips, cpu_samples);
ips->mch_avg_power = calc_avg_power(ips, mchp_samples);
kfree(mcp_samples);
kfree(ctv1_samples);
kfree(ctv2_samples);
kfree(mch_samples);
kfree(cpu_samples);
kfree(mchp_samples);
/* Start the adjustment thread now that we have data */
wake_up_process(ips->adjust);
/*
* Ok, now we have an initial avg. From here on out, we track the
* running avg using a decaying average calculation. This allows
* us to reduce the sample frequency if the CPU and GPU are idle.
*/
old_cpu_power = thm_readl(THM_CEC);
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
last_sample_period = IPS_SAMPLE_PERIOD;
timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
do {
u32 cpu_val, mch_val;
u16 val;
/* MCP itself */
val = read_ptv(ips);
ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val);
/* Processor 0 */
val = read_ctv(ips, 0);
ips->ctv1_avg_temp =
update_average_temp(ips->ctv1_avg_temp, val);
/* Power */
cpu_val = get_cpu_power(ips, &old_cpu_power,
last_sample_period);
ips->cpu_avg_power =
update_average_power(ips->cpu_avg_power, cpu_val);
if (ips->second_cpu) {
/* Processor 1 */
val = read_ctv(ips, 1);
ips->ctv2_avg_temp =
update_average_temp(ips->ctv2_avg_temp, val);
}
/* MCH */
val = read_mgtv(ips);
ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val);
/* Power */
if (ips->read_mch_val) {
mch_val = ips->read_mch_val();
ips->mch_avg_power =
update_average_power(ips->mch_avg_power,
mch_val);
}
/*
* Make sure ME is updating thermal regs.
* Note:
* If it's been more than a second since the last update,
* the ME is probably hung.
*/
cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >>
ITV_ME_SEQNO_SHIFT;
if (cur_seqno == last_seqno &&
time_after(jiffies, seqno_timestamp + HZ)) {
dev_warn(ips->dev,
"ME failed to update for more than 1s, likely hung\n");
} else {
seqno_timestamp = get_jiffies_64();
last_seqno = cur_seqno;
}
last_msecs = jiffies_to_msecs(jiffies);
expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
__set_current_state(TASK_INTERRUPTIBLE);
mod_timer(&ips->timer, expire);
schedule();
/* Calculate actual sample period for power averaging */
last_sample_period = jiffies_to_msecs(jiffies) - last_msecs;
if (!last_sample_period)
last_sample_period = 1;
} while (!kthread_should_stop());
del_timer_sync(&ips->timer);
dev_dbg(ips->dev, "ips-monitor thread stopped\n");
return 0;
}
#if 0
#define THM_DUMPW(reg) \
{ \
u16 val = thm_readw(reg); \
dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
}
#define THM_DUMPL(reg) \
{ \
u32 val = thm_readl(reg); \
dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
}
#define THM_DUMPQ(reg) \
{ \
u64 val = thm_readq(reg); \
dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
}
static void dump_thermal_info(struct ips_driver *ips)
{
u16 ptl;
ptl = thm_readw(THM_PTL);
dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
THM_DUMPW(THM_CTA);
THM_DUMPW(THM_TRC);
THM_DUMPW(THM_CTV1);
THM_DUMPL(THM_STS);
THM_DUMPW(THM_PTV);
THM_DUMPQ(THM_MGTV);
}
#endif
/**
* ips_irq_handler - handle temperature triggers and other IPS events
* @irq: irq number
* @arg: unused
*
* Handle temperature limit trigger events, generally by lowering the clamps.
* If we're at a critical limit, we clamp back to the lowest possible value
* to prevent emergency shutdown.
*/
static irqreturn_t ips_irq_handler(int irq, void *arg)
{
struct ips_driver *ips = arg;
u8 tses = thm_readb(THM_TSES);
u8 tes = thm_readb(THM_TES);
if (!tses && !tes)
return IRQ_NONE;
dev_info(ips->dev, "TSES: 0x%02x\n", tses);
dev_info(ips->dev, "TES: 0x%02x\n", tes);
/* STS update from EC? */
if (tes & 1) {
u32 sts, tc1;
sts = thm_readl(THM_STS);
tc1 = thm_readl(THM_TC1);
if (sts & STS_NVV) {
spin_lock(&ips->turbo_status_lock);
ips->core_power_limit = (sts & STS_PCPL_MASK) >>
STS_PCPL_SHIFT;
ips->mch_power_limit = (sts & STS_GPL_MASK) >>
STS_GPL_SHIFT;
/* ignore EC CPU vs GPU pref */
ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS);
/*
* Disable turbo for now, until we can figure
* out why the power figures are wrong
*/
ips->cpu_turbo_enabled = false;
if (ips->gpu_busy)
ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS);
ips->mcp_temp_limit = (sts & STS_PTL_MASK) >>
STS_PTL_SHIFT;
ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >>
STS_PPL_SHIFT;
verify_limits(ips);
spin_unlock(&ips->turbo_status_lock);
thm_writeb(THM_SEC, SEC_ACK);
}
thm_writeb(THM_TES, tes);
}
/* Thermal trip */
if (tses) {
dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
tses);
thm_writeb(THM_TSES, tses);
}
return IRQ_HANDLED;
}
#ifndef CONFIG_DEBUG_FS
static void ips_debugfs_init(struct ips_driver *ips) { return; }
static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
#else
/* Expose current state and limits in debugfs if possible */
static int cpu_temp_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100,
ips->ctv1_avg_temp % 100);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cpu_temp);
static int cpu_power_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
seq_printf(m, "%dmW\n", ips->cpu_avg_power);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cpu_power);
static int cpu_clamp_show(struct seq_file *m, void *data)
{
u64 turbo_override;
int tdp, tdc;
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
tdp = (int)(turbo_override & TURBO_TDP_MASK);
tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT);
/* Convert to .1W/A units */
tdp = tdp * 10 / 8;
tdc = tdc * 10 / 8;
/* Watts Amperes */
seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10,
tdc / 10, tdc % 10);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cpu_clamp);
static int mch_temp_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100,
ips->mch_avg_temp % 100);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mch_temp);
static int mch_power_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
seq_printf(m, "%dmW\n", ips->mch_avg_power);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mch_power);
static void ips_debugfs_cleanup(struct ips_driver *ips)
{
debugfs_remove_recursive(ips->debug_root);
}
static void ips_debugfs_init(struct ips_driver *ips)
{
ips->debug_root = debugfs_create_dir("ips", NULL);
debugfs_create_file("cpu_temp", 0444, ips->debug_root, ips, &cpu_temp_fops);
debugfs_create_file("cpu_power", 0444, ips->debug_root, ips, &cpu_power_fops);
debugfs_create_file("cpu_clamp", 0444, ips->debug_root, ips, &cpu_clamp_fops);
debugfs_create_file("mch_temp", 0444, ips->debug_root, ips, &mch_temp_fops);
debugfs_create_file("mch_power", 0444, ips->debug_root, ips, &mch_power_fops);
}
#endif /* CONFIG_DEBUG_FS */
/**
* ips_detect_cpu - detect whether CPU supports IPS
*
* Walk our list and see if we're on a supported CPU. If we find one,
* return the limits for it.
*/
static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
{
u64 turbo_power, misc_en;
struct ips_mcp_limits *limits = NULL;
u16 tdp;
if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
dev_info(ips->dev, "Non-IPS CPU detected.\n");
return NULL;
}
rdmsrl(IA32_MISC_ENABLE, misc_en);
/*
* If the turbo enable bit isn't set, we shouldn't try to enable/disable
* turbo manually or we'll get an illegal MSR access, even though
* turbo will still be available.
*/
if (misc_en & IA32_MISC_TURBO_EN)
ips->turbo_toggle_allowed = true;
else
ips->turbo_toggle_allowed = false;
if (strstr(boot_cpu_data.x86_model_id, "CPU M"))
limits = &ips_sv_limits;
else if (strstr(boot_cpu_data.x86_model_id, "CPU L"))
limits = &ips_lv_limits;
else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
limits = &ips_ulv_limits;
else {
dev_info(ips->dev, "No CPUID match found.\n");
return NULL;
}
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
tdp = turbo_power & TURBO_TDP_MASK;
/* Sanity check TDP against CPU */
if (limits->core_power_limit != (tdp / 8) * 1000) {
dev_info(ips->dev,
"CPU TDP doesn't match expected value (found %d, expected %d)\n",
tdp / 8, limits->core_power_limit / 1000);
limits->core_power_limit = (tdp / 8) * 1000;
}
return limits;
}
/**
* ips_get_i915_syms - try to get GPU control methods from i915 driver
* @ips: IPS driver
*
* The i915 driver exports several interfaces to allow the IPS driver to
* monitor and control graphics turbo mode. If we can find them, we can
* enable graphics turbo, otherwise we must disable it to avoid exceeding
* thermal and power limits in the MCP.
*/
static bool ips_get_i915_syms(struct ips_driver *ips)
{
ips->read_mch_val = symbol_get(i915_read_mch_val);
if (!ips->read_mch_val)
goto out_err;
ips->gpu_raise = symbol_get(i915_gpu_raise);
if (!ips->gpu_raise)
goto out_put_mch;
ips->gpu_lower = symbol_get(i915_gpu_lower);
if (!ips->gpu_lower)
goto out_put_raise;
ips->gpu_busy = symbol_get(i915_gpu_busy);
if (!ips->gpu_busy)
goto out_put_lower;
ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable);
if (!ips->gpu_turbo_disable)
goto out_put_busy;
return true;
out_put_busy:
symbol_put(i915_gpu_busy);
out_put_lower:
symbol_put(i915_gpu_lower);
out_put_raise:
symbol_put(i915_gpu_raise);
out_put_mch:
symbol_put(i915_read_mch_val);
out_err:
return false;
}
static bool
ips_gpu_turbo_enabled(struct ips_driver *ips)
{
if (!ips->gpu_busy && late_i915_load) {
if (ips_get_i915_syms(ips)) {
dev_info(ips->dev,
"i915 driver attached, reenabling gpu turbo\n");
ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
}
}
return ips->gpu_turbo_enabled;
}
void
ips_link_to_i915_driver(void)
{
/* We can't cleanly get at the various ips_driver structs from
* this caller (the i915 driver), so just set a flag saying
* that it's time to try getting the symbols again.
*/
late_i915_load = true;
}
EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
static const struct pci_device_id ips_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ips_id_table);
static int ips_blacklist_callback(const struct dmi_system_id *id)
{
pr_info("Blacklisted intel_ips for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id ips_blacklist[] = {
{
.callback = ips_blacklist_callback,
.ident = "HP ProBook",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
},
},
{ } /* terminating entry */
};
static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u64 platform_info;
struct ips_driver *ips;
u32 hts;
int ret = 0;
u16 htshi, trc, trc_required_mask;
u8 tse;
if (dmi_check_system(ips_blacklist))
return -ENODEV;
ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
if (!ips)
return -ENOMEM;
spin_lock_init(&ips->turbo_status_lock);
ips->dev = &dev->dev;
ips->limits = ips_detect_cpu(ips);
if (!ips->limits) {
dev_info(&dev->dev, "IPS not supported on this CPU\n");
return -ENXIO;
}
ret = pcim_enable_device(dev);
if (ret) {
dev_err(&dev->dev, "can't enable PCI device, aborting\n");
return ret;
}
ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
if (ret) {
dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
return ret;
}
ips->regmap = pcim_iomap_table(dev)[0];
pci_set_drvdata(dev, ips);
tse = thm_readb(THM_TSE);
if (tse != TSE_EN) {
dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
return -ENXIO;
}
trc = thm_readw(THM_TRC);
trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
if ((trc & trc_required_mask) != trc_required_mask) {
dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
return -ENXIO;
}
if (trc & TRC_CORE2_EN)
ips->second_cpu = true;
update_turbo_limits(ips);
dev_dbg(&dev->dev, "max cpu power clamp: %dW\n",
ips->mcp_power_limit / 10);
dev_dbg(&dev->dev, "max core power clamp: %dW\n",
ips->core_power_limit / 10);
/* BIOS may update limits at runtime */
if (thm_readl(THM_PSC) & PSP_PBRT)
ips->poll_turbo_status = true;
if (!ips_get_i915_syms(ips)) {
dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
ips->gpu_turbo_enabled = false;
} else {
dev_dbg(&dev->dev, "graphics turbo enabled\n");
ips->gpu_turbo_enabled = true;
}
/*
* Check PLATFORM_INFO MSR to make sure this chip is
* turbo capable.
*/
rdmsrl(PLATFORM_INFO, platform_info);
if (!(platform_info & PLATFORM_TDP)) {
dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
return -ENODEV;
}
/*
* IRQ handler for ME interaction
* Note: don't use MSI here as the PCH has bugs.
*/
ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
if (ret < 0)
return ret;
ips->irq = pci_irq_vector(dev, 0);
ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
if (ret) {
dev_err(&dev->dev, "request irq failed, aborting\n");
return ret;
}
/* Enable aux, hot & critical interrupts */
thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI |
TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI);
thm_writeb(THM_TEN, TEN_UPDATE_EN);
/* Collect adjustment values */
ips->cta_val = thm_readw(THM_CTA);
ips->pta_val = thm_readw(THM_PTA);
ips->mgta_val = thm_readw(THM_MGTA);
/* Save turbo limits & ratios */
rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
ips_disable_cpu_turbo(ips);
ips->cpu_turbo_enabled = false;
/* Create thermal adjust thread */
ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust");
if (IS_ERR(ips->adjust)) {
dev_err(&dev->dev,
"failed to create thermal adjust thread, aborting\n");
ret = -ENOMEM;
goto error_free_irq;
}
/*
* Set up the work queue and monitor thread. The monitor thread
* will wake up ips_adjust thread.
*/
ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor");
if (IS_ERR(ips->monitor)) {
dev_err(&dev->dev,
"failed to create thermal monitor thread, aborting\n");
ret = -ENOMEM;
goto error_thread_cleanup;
}
hts = (ips->core_power_limit << HTS_PCPL_SHIFT) |
(ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV;
htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT;
thm_writew(THM_HTSHI, htshi);
thm_writel(THM_HTS, hts);
ips_debugfs_init(ips);
dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n",
ips->mcp_temp_limit);
return ret;
error_thread_cleanup:
kthread_stop(ips->adjust);
error_free_irq:
free_irq(ips->irq, ips);
pci_free_irq_vectors(dev);
return ret;
}
static void ips_remove(struct pci_dev *dev)
{
struct ips_driver *ips = pci_get_drvdata(dev);
u64 turbo_override;
ips_debugfs_cleanup(ips);
/* Release i915 driver */
if (ips->read_mch_val)
symbol_put(i915_read_mch_val);
if (ips->gpu_raise)
symbol_put(i915_gpu_raise);
if (ips->gpu_lower)
symbol_put(i915_gpu_lower);
if (ips->gpu_busy)
symbol_put(i915_gpu_busy);
if (ips->gpu_turbo_disable)
symbol_put(i915_gpu_turbo_disable);
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN);
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
free_irq(ips->irq, ips);
pci_free_irq_vectors(dev);
if (ips->adjust)
kthread_stop(ips->adjust);
if (ips->monitor)
kthread_stop(ips->monitor);
dev_dbg(&dev->dev, "IPS driver removed\n");
}
static struct pci_driver ips_pci_driver = {
.name = "intel ips",
.id_table = ips_id_table,
.probe = ips_probe,
.remove = ips_remove,
};
module_pci_driver(ips_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jesse Barnes <[email protected]>");
MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
| linux-master | drivers/platform/x86/intel_ips.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*-*-linux-c-*-*/
/*
Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
*/
/*
* msi-laptop.c - MSI S270 laptop support. This laptop is sold under
* various brands, including "Cytron/TCM/Medion/Tchibo MD96100".
*
* Driver also supports S271, S420 models.
*
* This driver exports a few files in /sys/devices/platform/msi-laptop-pf/:
*
* lcd_level - Screen brightness: contains a single integer in the
* range 0..8. (rw)
*
* auto_brightness - Enable automatic brightness control: contains
* either 0 or 1. If set to 1 the hardware adjusts the screen
* brightness automatically when the power cord is
* plugged/unplugged. (rw)
*
* wlan - WLAN subsystem enabled: contains either 0 or 1. (ro)
*
* bluetooth - Bluetooth subsystem enabled: contains either 0 or 1
* Please note that this file is constantly 0 if no Bluetooth
* hardware is available. (ro)
*
* In addition to these platform device attributes the driver
* registers itself in the Linux backlight control subsystem and is
* available to userspace under /sys/class/backlight/msi-laptop-bl/.
*
* This driver might work on other laptops produced by MSI. If you
* want to try it you can pass force=1 as argument to the module which
* will force it to load even when the DMI data doesn't identify the
* laptop as MSI S270. YMMV.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/i8042.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
#define MSI_LCD_LEVEL_MAX 9
#define MSI_EC_COMMAND_WIRELESS 0x10
#define MSI_EC_COMMAND_LCD_LEVEL 0x11
#define MSI_STANDARD_EC_COMMAND_ADDRESS 0x2e
#define MSI_STANDARD_EC_BLUETOOTH_MASK (1 << 0)
#define MSI_STANDARD_EC_WEBCAM_MASK (1 << 1)
#define MSI_STANDARD_EC_WLAN_MASK (1 << 3)
#define MSI_STANDARD_EC_3G_MASK (1 << 4)
/* For set SCM load flag to disable BIOS fn key */
#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d
#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0)
#define MSI_STANDARD_EC_FUNCTIONS_ADDRESS 0xe4
/* Power LED is orange - Turbo mode */
#define MSI_STANDARD_EC_TURBO_MASK (1 << 1)
/* Power LED is green - ECO mode */
#define MSI_STANDARD_EC_ECO_MASK (1 << 3)
/* Touchpad is turned on */
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
/* If this bit != bit 1, turbo mode can't be toggled */
#define MSI_STANDARD_EC_TURBO_COOLDOWN_MASK (1 << 7)
#define MSI_STANDARD_EC_FAN_ADDRESS 0x33
/* If zero, fan rotates at maximal speed */
#define MSI_STANDARD_EC_AUTOFAN_MASK (1 << 0)
#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device);
#endif
static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
static int auto_brightness;
module_param(auto_brightness, int, 0);
MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)");
static const struct key_entry msi_laptop_keymap[] = {
{KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, /* Touch Pad On */
{KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },/* Touch Pad On */
{KE_END, 0}
};
static struct input_dev *msi_laptop_input_dev;
static int wlan_s, bluetooth_s, threeg_s;
static int threeg_exists;
static struct rfkill *rfk_wlan, *rfk_bluetooth, *rfk_threeg;
/* MSI laptop quirks */
struct quirk_entry {
bool old_ec_model;
/* Some MSI 3G netbook only have one fn key to control
* Wlan/Bluetooth/3G, those netbook will load the SCM (windows app) to
* disable the original Wlan/Bluetooth control by BIOS when user press
* fn key, then control Wlan/Bluetooth/3G by SCM (software control by
* OS). Without SCM, user cann't on/off 3G module on those 3G netbook.
* On Linux, msi-laptop driver will do the same thing to disable the
* original BIOS control, then might need use HAL or other userland
* application to do the software control that simulate with SCM.
* e.g. MSI N034 netbook
*/
bool load_scm_model;
/* Some MSI laptops need delay before reading from EC */
bool ec_delay;
/* Some MSI Wind netbooks (e.g. MSI Wind U100) need loading SCM to get
* some features working (e.g. ECO mode), but we cannot change
* Wlan/Bluetooth state in software and we can only read its state.
*/
bool ec_read_only;
};
static struct quirk_entry *quirks;
/* Hardware access */
static int set_lcd_level(int level)
{
u8 buf[2];
if (level < 0 || level >= MSI_LCD_LEVEL_MAX)
return -EINVAL;
buf[0] = 0x80;
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
NULL, 0);
}
static int get_lcd_level(void)
{
u8 wdata = 0, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
&rdata, 1);
if (result < 0)
return result;
return (int) rdata / 31;
}
static int get_auto_brightness(void)
{
u8 wdata = 4, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
&rdata, 1);
if (result < 0)
return result;
return !!(rdata & 8);
}
static int set_auto_brightness(int enable)
{
u8 wdata[2], rdata;
int result;
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
&rdata, 1);
if (result < 0)
return result;
wdata[0] = 0x84;
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
NULL, 0);
}
static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
{
int status;
u8 wdata = 0, rdata;
int result;
if (sscanf(buf, "%i", &status) != 1 || (status < 0 || status > 1))
return -EINVAL;
if (quirks->ec_read_only)
return 0;
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
return result;
if (!!(rdata & mask) != status) {
/* reverse device bit */
if (rdata & mask)
wdata = rdata & ~mask;
else
wdata = rdata | mask;
result = ec_write(MSI_STANDARD_EC_COMMAND_ADDRESS, wdata);
if (result < 0)
return result;
}
return count;
}
static int get_wireless_state(int *wlan, int *bluetooth)
{
u8 wdata = 0, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
return result;
if (wlan)
*wlan = !!(rdata & 8);
if (bluetooth)
*bluetooth = !!(rdata & 128);
return 0;
}
static int get_wireless_state_ec_standard(void)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
return result;
wlan_s = !!(rdata & MSI_STANDARD_EC_WLAN_MASK);
bluetooth_s = !!(rdata & MSI_STANDARD_EC_BLUETOOTH_MASK);
threeg_s = !!(rdata & MSI_STANDARD_EC_3G_MASK);
return 0;
}
static int get_threeg_exists(void)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS, &rdata);
if (result < 0)
return result;
threeg_exists = !!(rdata & MSI_STANDARD_EC_3G_MASK);
return 0;
}
/* Backlight device stuff */
static int bl_get_brightness(struct backlight_device *b)
{
return get_lcd_level();
}
static int bl_update_status(struct backlight_device *b)
{
return set_lcd_level(b->props.brightness);
}
static const struct backlight_ops msibl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
static struct backlight_device *msibl_device;
/* Platform device */
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, enabled = 0;
if (quirks->old_ec_model) {
ret = get_wireless_state(&enabled, NULL);
} else {
ret = get_wireless_state_ec_standard();
enabled = wlan_s;
}
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", enabled);
}
static ssize_t store_wlan(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return set_device_state(buf, count, MSI_STANDARD_EC_WLAN_MASK);
}
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret, enabled = 0;
if (quirks->old_ec_model) {
ret = get_wireless_state(NULL, &enabled);
} else {
ret = get_wireless_state_ec_standard();
enabled = bluetooth_s;
}
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", enabled);
}
static ssize_t store_bluetooth(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return set_device_state(buf, count, MSI_STANDARD_EC_BLUETOOTH_MASK);
}
static ssize_t show_threeg(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
/* old msi ec not support 3G */
if (quirks->old_ec_model)
return -ENODEV;
ret = get_wireless_state_ec_standard();
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", threeg_s);
}
static ssize_t store_threeg(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return set_device_state(buf, count, MSI_STANDARD_EC_3G_MASK);
}
static ssize_t show_lcd_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
ret = get_lcd_level();
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", ret);
}
static ssize_t store_lcd_level(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int level, ret;
if (sscanf(buf, "%i", &level) != 1 ||
(level < 0 || level >= MSI_LCD_LEVEL_MAX))
return -EINVAL;
ret = set_lcd_level(level);
if (ret < 0)
return ret;
return count;
}
static ssize_t show_auto_brightness(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
ret = get_auto_brightness();
if (ret < 0)
return ret;
return sprintf(buf, "%i\n", ret);
}
static ssize_t store_auto_brightness(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int enable, ret;
if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
return -EINVAL;
ret = set_auto_brightness(enable);
if (ret < 0)
return ret;
return count;
}
static ssize_t show_touchpad(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return result;
return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
}
static ssize_t show_turbo(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return result;
return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
}
static ssize_t show_eco(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return result;
return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
}
static ssize_t show_turbo_cooldown(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return result;
return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
(!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
}
static ssize_t show_auto_fan(struct device *dev,
struct device_attribute *attr, char *buf)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FAN_ADDRESS, &rdata);
if (result < 0)
return result;
return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
}
static ssize_t store_auto_fan(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int enable, result;
if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
return -EINVAL;
result = ec_write(MSI_STANDARD_EC_FAN_ADDRESS, enable);
if (result < 0)
return result;
return count;
}
static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
store_auto_brightness);
static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
static DEVICE_ATTR(touchpad, 0444, show_touchpad, NULL);
static DEVICE_ATTR(turbo_mode, 0444, show_turbo, NULL);
static DEVICE_ATTR(eco_mode, 0444, show_eco, NULL);
static DEVICE_ATTR(turbo_cooldown, 0444, show_turbo_cooldown, NULL);
static DEVICE_ATTR(auto_fan, 0644, show_auto_fan, store_auto_fan);
static struct attribute *msipf_attributes[] = {
&dev_attr_bluetooth.attr,
&dev_attr_wlan.attr,
&dev_attr_touchpad.attr,
&dev_attr_turbo_mode.attr,
&dev_attr_eco_mode.attr,
&dev_attr_turbo_cooldown.attr,
&dev_attr_auto_fan.attr,
NULL
};
static struct attribute *msipf_old_attributes[] = {
&dev_attr_lcd_level.attr,
&dev_attr_auto_brightness.attr,
NULL
};
static const struct attribute_group msipf_attribute_group = {
.attrs = msipf_attributes
};
static const struct attribute_group msipf_old_attribute_group = {
.attrs = msipf_old_attributes
};
static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
.pm = &msi_laptop_pm,
},
};
static struct platform_device *msipf_device;
/* Initialization */
static struct quirk_entry quirk_old_ec_model = {
.old_ec_model = true,
};
static struct quirk_entry quirk_load_scm_model = {
.load_scm_model = true,
.ec_delay = true,
};
static struct quirk_entry quirk_load_scm_ro_model = {
.load_scm_model = true,
.ec_read_only = true,
};
static int dmi_check_cb(const struct dmi_system_id *dmi)
{
pr_info("Identified laptop model '%s'\n", dmi->ident);
quirks = dmi->driver_data;
return 1;
}
static unsigned long msi_work_delay(int msecs)
{
if (quirks->ec_delay)
return msecs_to_jiffies(msecs);
return 0;
}
static const struct dmi_system_id msi_dmi_table[] __initconst = {
{
.ident = "MSI S270",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
.ident = "MSI S271",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1058"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
.ident = "MSI S420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1412"),
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
.ident = "Medion MD96100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
.ident = "MSI N034",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-N034"),
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
.driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
.ident = "MSI N051",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-N051"),
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
.driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
.ident = "MSI N014",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
},
.driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
.ident = "MSI CR620",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
},
.driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
.ident = "MSI U270",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
},
.driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
.ident = "MSI U90/U100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "U90/U100"),
},
.driver_data = &quirk_load_scm_ro_model,
.callback = dmi_check_cb
},
{ }
};
MODULE_DEVICE_TABLE(dmi, msi_dmi_table);
static int rfkill_bluetooth_set(void *data, bool blocked)
{
/* Do something with blocked...*/
/*
* blocked == false is on
* blocked == true is off
*/
int result = set_device_state(blocked ? "0" : "1", 0,
MSI_STANDARD_EC_BLUETOOTH_MASK);
return min(result, 0);
}
static int rfkill_wlan_set(void *data, bool blocked)
{
int result = set_device_state(blocked ? "0" : "1", 0,
MSI_STANDARD_EC_WLAN_MASK);
return min(result, 0);
}
static int rfkill_threeg_set(void *data, bool blocked)
{
int result = set_device_state(blocked ? "0" : "1", 0,
MSI_STANDARD_EC_3G_MASK);
return min(result, 0);
}
static const struct rfkill_ops rfkill_bluetooth_ops = {
.set_block = rfkill_bluetooth_set
};
static const struct rfkill_ops rfkill_wlan_ops = {
.set_block = rfkill_wlan_set
};
static const struct rfkill_ops rfkill_threeg_ops = {
.set_block = rfkill_threeg_set
};
static void rfkill_cleanup(void)
{
if (rfk_bluetooth) {
rfkill_unregister(rfk_bluetooth);
rfkill_destroy(rfk_bluetooth);
}
if (rfk_threeg) {
rfkill_unregister(rfk_threeg);
rfkill_destroy(rfk_threeg);
}
if (rfk_wlan) {
rfkill_unregister(rfk_wlan);
rfkill_destroy(rfk_wlan);
}
}
static bool msi_rfkill_set_state(struct rfkill *rfkill, bool blocked)
{
if (quirks->ec_read_only)
return rfkill_set_hw_state(rfkill, blocked);
else
return rfkill_set_sw_state(rfkill, blocked);
}
static void msi_update_rfkill(struct work_struct *ignored)
{
get_wireless_state_ec_standard();
if (rfk_wlan)
msi_rfkill_set_state(rfk_wlan, !wlan_s);
if (rfk_bluetooth)
msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
if (rfk_threeg)
msi_rfkill_set_state(rfk_threeg, !threeg_s);
}
static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
static void msi_send_touchpad_key(struct work_struct *ignored)
{
u8 rdata;
int result;
result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return;
sparse_keymap_report_event(msi_laptop_input_dev,
(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
static bool extended;
if (str & I8042_STR_AUXDATA)
return false;
/* 0x54 wwan, 0x62 bluetooth, 0x76 wlan, 0xE4 touchpad toggle*/
if (unlikely(data == 0xe0)) {
extended = true;
return false;
} else if (unlikely(extended)) {
extended = false;
switch (data) {
case 0xE4:
schedule_delayed_work(&msi_touchpad_dwork, msi_work_delay(500));
break;
case 0x54:
case 0x62:
case 0x76:
schedule_delayed_work(&msi_rfkill_dwork, msi_work_delay(500));
break;
}
}
return false;
}
static void msi_init_rfkill(struct work_struct *ignored)
{
if (rfk_wlan) {
msi_rfkill_set_state(rfk_wlan, !wlan_s);
rfkill_wlan_set(NULL, !wlan_s);
}
if (rfk_bluetooth) {
msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
rfkill_bluetooth_set(NULL, !bluetooth_s);
}
if (rfk_threeg) {
msi_rfkill_set_state(rfk_threeg, !threeg_s);
rfkill_threeg_set(NULL, !threeg_s);
}
}
static DECLARE_DELAYED_WORK(msi_rfkill_init, msi_init_rfkill);
static int rfkill_init(struct platform_device *sdev)
{
/* add rfkill */
int retval;
/* keep the hardware wireless state */
get_wireless_state_ec_standard();
rfk_bluetooth = rfkill_alloc("msi-bluetooth", &sdev->dev,
RFKILL_TYPE_BLUETOOTH,
&rfkill_bluetooth_ops, NULL);
if (!rfk_bluetooth) {
retval = -ENOMEM;
goto err_bluetooth;
}
retval = rfkill_register(rfk_bluetooth);
if (retval)
goto err_bluetooth;
rfk_wlan = rfkill_alloc("msi-wlan", &sdev->dev, RFKILL_TYPE_WLAN,
&rfkill_wlan_ops, NULL);
if (!rfk_wlan) {
retval = -ENOMEM;
goto err_wlan;
}
retval = rfkill_register(rfk_wlan);
if (retval)
goto err_wlan;
if (threeg_exists) {
rfk_threeg = rfkill_alloc("msi-threeg", &sdev->dev,
RFKILL_TYPE_WWAN, &rfkill_threeg_ops, NULL);
if (!rfk_threeg) {
retval = -ENOMEM;
goto err_threeg;
}
retval = rfkill_register(rfk_threeg);
if (retval)
goto err_threeg;
}
/* schedule to run rfkill state initial */
schedule_delayed_work(&msi_rfkill_init, msi_work_delay(1000));
return 0;
err_threeg:
rfkill_destroy(rfk_threeg);
if (rfk_wlan)
rfkill_unregister(rfk_wlan);
err_wlan:
rfkill_destroy(rfk_wlan);
if (rfk_bluetooth)
rfkill_unregister(rfk_bluetooth);
err_bluetooth:
rfkill_destroy(rfk_bluetooth);
return retval;
}
static int msi_scm_disable_hw_fn_handling(void)
{
u8 data;
int result;
if (!quirks->load_scm_model)
return 0;
/* set load SCM to disable hardware control by fn key */
result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
if (result < 0)
return result;
result = ec_write(MSI_STANDARD_EC_SCM_LOAD_ADDRESS,
data | MSI_STANDARD_EC_SCM_LOAD_MASK);
if (result < 0)
return result;
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device)
{
return msi_scm_disable_hw_fn_handling();
}
#endif
static int __init msi_laptop_input_setup(void)
{
int err;
msi_laptop_input_dev = input_allocate_device();
if (!msi_laptop_input_dev)
return -ENOMEM;
msi_laptop_input_dev->name = "MSI Laptop hotkeys";
msi_laptop_input_dev->phys = "msi-laptop/input0";
msi_laptop_input_dev->id.bustype = BUS_HOST;
err = sparse_keymap_setup(msi_laptop_input_dev,
msi_laptop_keymap, NULL);
if (err)
goto err_free_dev;
err = input_register_device(msi_laptop_input_dev);
if (err)
goto err_free_dev;
return 0;
err_free_dev:
input_free_device(msi_laptop_input_dev);
return err;
}
static int __init load_scm_model_init(struct platform_device *sdev)
{
int result;
if (!quirks->ec_read_only) {
/* allow userland write sysfs file */
dev_attr_bluetooth.store = store_bluetooth;
dev_attr_wlan.store = store_wlan;
dev_attr_threeg.store = store_threeg;
dev_attr_bluetooth.attr.mode |= S_IWUSR;
dev_attr_wlan.attr.mode |= S_IWUSR;
dev_attr_threeg.attr.mode |= S_IWUSR;
}
/* disable hardware control by fn key */
result = msi_scm_disable_hw_fn_handling();
if (result < 0)
return result;
/* initial rfkill */
result = rfkill_init(sdev);
if (result < 0)
goto fail_rfkill;
/* setup input device */
result = msi_laptop_input_setup();
if (result)
goto fail_input;
result = i8042_install_filter(msi_laptop_i8042_filter);
if (result) {
pr_err("Unable to install key filter\n");
goto fail_filter;
}
return 0;
fail_filter:
input_unregister_device(msi_laptop_input_dev);
fail_input:
rfkill_cleanup();
fail_rfkill:
return result;
}
static void msi_scm_model_exit(void)
{
if (!quirks->load_scm_model)
return;
i8042_remove_filter(msi_laptop_i8042_filter);
cancel_delayed_work_sync(&msi_touchpad_dwork);
input_unregister_device(msi_laptop_input_dev);
cancel_delayed_work_sync(&msi_rfkill_dwork);
rfkill_cleanup();
}
static int __init msi_init(void)
{
int ret;
if (acpi_disabled)
return -ENODEV;
dmi_check_system(msi_dmi_table);
if (!quirks)
/* quirks may be NULL if no match in DMI table */
quirks = &quirk_load_scm_model;
if (force)
quirks = &quirk_old_ec_model;
if (!quirks->old_ec_model)
get_threeg_exists();
if (auto_brightness < 0 || auto_brightness > 2)
return -EINVAL;
/* Register backlight stuff */
if (quirks->old_ec_model &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
msibl_device = backlight_device_register("msi-laptop-bl", NULL,
NULL, &msibl_ops,
&props);
if (IS_ERR(msibl_device))
return PTR_ERR(msibl_device);
}
ret = platform_driver_register(&msipf_driver);
if (ret)
goto fail_backlight;
/* Register platform stuff */
msipf_device = platform_device_alloc("msi-laptop-pf", PLATFORM_DEVID_NONE);
if (!msipf_device) {
ret = -ENOMEM;
goto fail_platform_driver;
}
ret = platform_device_add(msipf_device);
if (ret)
goto fail_device_add;
if (quirks->load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
ret = -EINVAL;
goto fail_scm_model_init;
}
ret = sysfs_create_group(&msipf_device->dev.kobj,
&msipf_attribute_group);
if (ret)
goto fail_create_group;
if (!quirks->old_ec_model) {
if (threeg_exists)
ret = device_create_file(&msipf_device->dev,
&dev_attr_threeg);
if (ret)
goto fail_create_attr;
} else {
ret = sysfs_create_group(&msipf_device->dev.kobj,
&msipf_old_attribute_group);
if (ret)
goto fail_create_attr;
/* Disable automatic brightness control by default because
* this module was probably loaded to do brightness control in
* software. */
if (auto_brightness != 2)
set_auto_brightness(auto_brightness);
}
return 0;
fail_create_attr:
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
fail_create_group:
msi_scm_model_exit();
fail_scm_model_init:
platform_device_del(msipf_device);
fail_device_add:
platform_device_put(msipf_device);
fail_platform_driver:
platform_driver_unregister(&msipf_driver);
fail_backlight:
backlight_device_unregister(msibl_device);
return ret;
}
static void __exit msi_cleanup(void)
{
msi_scm_model_exit();
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
platform_device_unregister(msipf_device);
platform_driver_unregister(&msipf_driver);
backlight_device_unregister(msibl_device);
if (quirks->old_ec_model) {
/* Enable automatic brightness control again */
if (auto_brightness != 2)
set_auto_brightness(1);
}
}
module_init(msi_init);
module_exit(msi_cleanup);
MODULE_AUTHOR("Lennart Poettering");
MODULE_DESCRIPTION("MSI Laptop Support");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/msi-laptop.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* eeepc-laptop.c - Asus Eee PC extras
*
* Based on asus_acpi.c as patched for the Eee PC by Asus:
* ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
* Based on eee.c from eeepc-linux
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/leds.h>
#include <linux/dmi.h>
#include <acpi/video.h>
#define EEEPC_LAPTOP_VERSION "0.1"
#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
#define EEEPC_LAPTOP_FILE "eeepc"
#define EEEPC_ACPI_CLASS "hotkey"
#define EEEPC_ACPI_DEVICE_NAME "Hotkey"
#define EEEPC_ACPI_HID "ASUS010"
MODULE_AUTHOR("Corentin Chary, Eric Cooper");
MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
MODULE_LICENSE("GPL");
static bool hotplug_disabled;
module_param(hotplug_disabled, bool, 0444);
MODULE_PARM_DESC(hotplug_disabled,
"Disable hotplug for wireless device. "
"If your laptop need that, please report to "
"[email protected].");
/*
* Definitions for Asus EeePC
*/
#define NOTIFY_BRN_MIN 0x20
#define NOTIFY_BRN_MAX 0x2f
enum {
DISABLE_ASL_WLAN = 0x0001,
DISABLE_ASL_BLUETOOTH = 0x0002,
DISABLE_ASL_IRDA = 0x0004,
DISABLE_ASL_CAMERA = 0x0008,
DISABLE_ASL_TV = 0x0010,
DISABLE_ASL_GPS = 0x0020,
DISABLE_ASL_DISPLAYSWITCH = 0x0040,
DISABLE_ASL_MODEM = 0x0080,
DISABLE_ASL_CARDREADER = 0x0100,
DISABLE_ASL_3G = 0x0200,
DISABLE_ASL_WIMAX = 0x0400,
DISABLE_ASL_HWCF = 0x0800
};
enum {
CM_ASL_WLAN = 0,
CM_ASL_BLUETOOTH,
CM_ASL_IRDA,
CM_ASL_1394,
CM_ASL_CAMERA,
CM_ASL_TV,
CM_ASL_GPS,
CM_ASL_DVDROM,
CM_ASL_DISPLAYSWITCH,
CM_ASL_PANELBRIGHT,
CM_ASL_BIOSFLASH,
CM_ASL_ACPIFLASH,
CM_ASL_CPUFV,
CM_ASL_CPUTEMPERATURE,
CM_ASL_FANCPU,
CM_ASL_FANCHASSIS,
CM_ASL_USBPORT1,
CM_ASL_USBPORT2,
CM_ASL_USBPORT3,
CM_ASL_MODEM,
CM_ASL_CARDREADER,
CM_ASL_3G,
CM_ASL_WIMAX,
CM_ASL_HWCF,
CM_ASL_LID,
CM_ASL_TYPE,
CM_ASL_PANELPOWER, /*P901*/
CM_ASL_TPD
};
static const char *cm_getv[] = {
"WLDG", "BTHG", NULL, NULL,
"CAMG", NULL, NULL, NULL,
NULL, "PBLG", NULL, NULL,
"CFVG", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODG",
"CRDG", "M3GG", "WIMG", "HWCF",
"LIDG", "TYPE", "PBPG", "TPDG"
};
static const char *cm_setv[] = {
"WLDS", "BTHS", NULL, NULL,
"CAMS", NULL, NULL, NULL,
"SDSP", "PBLS", "HDPS", NULL,
"CFVS", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODS",
"CRDS", "M3GS", "WIMS", NULL,
NULL, NULL, "PBPS", "TPDS"
};
static const struct key_entry eeepc_keymap[] = {
{ KE_KEY, 0x10, { KEY_WLAN } },
{ KE_KEY, 0x11, { KEY_WLAN } },
{ KE_KEY, 0x12, { KEY_PROG1 } },
{ KE_KEY, 0x13, { KEY_MUTE } },
{ KE_KEY, 0x14, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x15, { KEY_VOLUMEUP } },
{ KE_KEY, 0x16, { KEY_DISPLAY_OFF } },
{ KE_KEY, 0x1a, { KEY_COFFEE } },
{ KE_KEY, 0x1b, { KEY_ZOOM } },
{ KE_KEY, 0x1c, { KEY_PROG2 } },
{ KE_KEY, 0x1d, { KEY_PROG3 } },
{ KE_KEY, NOTIFY_BRN_MIN, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, NOTIFY_BRN_MAX, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x30, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x31, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x32, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x37, { KEY_F13 } }, /* Disable Touchpad */
{ KE_KEY, 0x38, { KEY_F14 } },
{ KE_IGNORE, 0x50, { KEY_RESERVED } }, /* AC plugged */
{ KE_IGNORE, 0x51, { KEY_RESERVED } }, /* AC unplugged */
{ KE_END, 0 },
};
/*
* This is the main structure, we can use it to store useful information
*/
struct eeepc_laptop {
acpi_handle handle; /* the handle of the acpi device */
u32 cm_supported; /* the control methods supported
by this BIOS */
bool cpufv_disabled;
bool hotplug_disabled;
u16 event_count[128]; /* count for each event */
struct platform_device *platform_device;
struct acpi_device *device; /* the device we are in */
struct backlight_device *backlight_device;
struct input_dev *inputdev;
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct led_classdev tpd_led;
int tpd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
};
/*
* ACPI Helpers
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
acpi_status status;
status = acpi_execute_simple_method(handle, (char *)method, val);
return (status == AE_OK ? 0 : -1);
}
static int read_acpi_int(acpi_handle handle, const char *method, int *val)
{
acpi_status status;
unsigned long long result;
status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
if (ACPI_FAILURE(status)) {
*val = -1;
return -1;
} else {
*val = result;
return 0;
}
}
static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
{
const char *method = cm_setv[cm];
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
if (write_acpi_int(eeepc->handle, method, value))
pr_warn("Error writing %s\n", method);
return 0;
}
static int get_acpi(struct eeepc_laptop *eeepc, int cm)
{
const char *method = cm_getv[cm];
int value;
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
if (read_acpi_int(eeepc->handle, method, &value))
pr_warn("Error reading %s\n", method);
return value;
}
static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
acpi_handle *handle)
{
const char *method = cm_setv[cm];
acpi_status status;
if (method == NULL)
return -ENODEV;
if ((eeepc->cm_supported & (0x1 << cm)) == 0)
return -ENODEV;
status = acpi_get_handle(eeepc->handle, (char *)method,
handle);
if (status != AE_OK) {
pr_warn("Error finding %s\n", method);
return -ENODEV;
}
return 0;
}
/*
* Sys helpers
*/
static int parse_arg(const char *buf, int *val)
{
if (sscanf(buf, "%i", val) != 1)
return -EINVAL;
return 0;
}
static ssize_t store_sys_acpi(struct device *dev, int cm,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
rv = set_acpi(eeepc, cm, value);
if (rv < 0)
return -EIO;
return count;
}
static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int value = get_acpi(eeepc, cm);
if (value < 0)
return -EIO;
return sprintf(buf, "%d\n", value);
}
#define EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_acpi(dev, _cm, buf); \
}
#define EEEPC_ACPI_STORE_FUNC(_name, _cm) \
static ssize_t _name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_acpi(dev, _cm, buf, count); \
}
#define EEEPC_CREATE_DEVICE_ATTR_RW(_name, _cm) \
EEEPC_ACPI_SHOW_FUNC(_name, _cm) \
EEEPC_ACPI_STORE_FUNC(_name, _cm) \
static DEVICE_ATTR_RW(_name)
#define EEEPC_CREATE_DEVICE_ATTR_WO(_name, _cm) \
EEEPC_ACPI_STORE_FUNC(_name, _cm) \
static DEVICE_ATTR_WO(_name)
EEEPC_CREATE_DEVICE_ATTR_RW(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR_RW(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR_WO(disp, CM_ASL_DISPLAYSWITCH);
struct eeepc_cpufv {
int num;
int cur;
};
static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
{
c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
if (c->cur < 0)
return -ENODEV;
c->num = (c->cur >> 8) & 0xff;
c->cur &= 0xff;
if (c->num == 0 || c->num > 12)
return -ENODEV;
return 0;
}
static ssize_t available_cpufv_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int i;
ssize_t len = 0;
if (get_cpufv(eeepc, &c))
return -ENODEV;
for (i = 0; i < c.num; i++)
len += sprintf(buf + len, "%d ", i);
len += sprintf(buf + len, "\n");
return len;
}
static ssize_t cpufv_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
if (get_cpufv(eeepc, &c))
return -ENODEV;
return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
}
static ssize_t cpufv_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int rv, value;
if (eeepc->cpufv_disabled)
return -EPERM;
if (get_cpufv(eeepc, &c))
return -ENODEV;
rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
if (value < 0 || value >= c.num)
return -EINVAL;
rv = set_acpi(eeepc, CM_ASL_CPUFV, value);
if (rv)
return rv;
return count;
}
static ssize_t cpufv_disabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", eeepc->cpufv_disabled);
}
static ssize_t cpufv_disabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
switch (value) {
case 0:
if (eeepc->cpufv_disabled)
pr_warn("cpufv enabled (not officially supported on this model)\n");
eeepc->cpufv_disabled = false;
return count;
case 1:
return -EPERM;
default:
return -EINVAL;
}
}
static DEVICE_ATTR_RW(cpufv);
static DEVICE_ATTR_RO(available_cpufv);
static DEVICE_ATTR_RW(cpufv_disabled);
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
&dev_attr_available_cpufv.attr,
&dev_attr_cpufv_disabled.attr,
NULL
};
static const struct attribute_group platform_attribute_group = {
.attrs = platform_attributes
};
static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
int result;
eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, PLATFORM_DEVID_NONE);
if (!eeepc->platform_device)
return -ENOMEM;
platform_set_drvdata(eeepc->platform_device, eeepc);
result = platform_device_add(eeepc->platform_device);
if (result)
goto fail_platform_device;
result = sysfs_create_group(&eeepc->platform_device->dev.kobj,
&platform_attribute_group);
if (result)
goto fail_sysfs;
return 0;
fail_sysfs:
platform_device_del(eeepc->platform_device);
fail_platform_device:
platform_device_put(eeepc->platform_device);
return result;
}
static void eeepc_platform_exit(struct eeepc_laptop *eeepc)
{
sysfs_remove_group(&eeepc->platform_device->dev.kobj,
&platform_attribute_group);
platform_device_unregister(eeepc->platform_device);
}
/*
* LEDs
*/
/*
* These functions actually update the LED's, and are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
* potentially bad time, such as a timer interrupt.
*/
static void tpd_led_update(struct work_struct *work)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
set_acpi(eeepc, CM_ASL_TPD, eeepc->tpd_led_wk);
}
static void tpd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
eeepc->tpd_led_wk = (value > 0) ? 1 : 0;
queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
}
static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
{
struct eeepc_laptop *eeepc;
eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
return get_acpi(eeepc, CM_ASL_TPD);
}
static int eeepc_led_init(struct eeepc_laptop *eeepc)
{
int rv;
if (get_acpi(eeepc, CM_ASL_TPD) == -ENODEV)
return 0;
eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!eeepc->led_workqueue)
return -ENOMEM;
INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
eeepc->tpd_led.name = "eeepc::touchpad";
eeepc->tpd_led.brightness_set = tpd_led_set;
if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
eeepc->tpd_led.brightness_get = tpd_led_get;
eeepc->tpd_led.max_brightness = 1;
rv = led_classdev_register(&eeepc->platform_device->dev,
&eeepc->tpd_led);
if (rv) {
destroy_workqueue(eeepc->led_workqueue);
return rv;
}
return 0;
}
static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
led_classdev_unregister(&eeepc->tpd_led);
if (eeepc->led_workqueue)
destroy_workqueue(eeepc->led_workqueue);
}
/*
* PCI hotplug (for wlan rfkill)
*/
static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
{
if (get_acpi(eeepc, CM_ASL_WLAN) == 1)
return false;
return true;
}
static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
{
struct pci_dev *port;
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
bool absent;
u32 l;
if (eeepc->wlan_rfkill)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
if (!eeepc->hotplug_slot.ops)
goto out_unlock;
port = acpi_get_pci_dev(handle);
if (!port) {
pr_warn("Unable to find port\n");
goto out_unlock;
}
bus = port->subordinate;
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
goto out_put_dev;
}
if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
pr_err("Unable to read PCI config space?\n");
goto out_put_dev;
}
absent = (l == 0xffffffff);
if (blocked != absent) {
pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
goto out_put_dev;
}
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
/* Device already present */
pci_dev_put(dev);
goto out_put_dev;
}
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
pci_bus_add_device(dev);
}
} else {
dev = pci_get_slot(bus, 0);
if (dev) {
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
}
out_put_dev:
pci_dev_put(port);
out_unlock:
pci_unlock_rescan_remove();
mutex_unlock(&eeepc->hotplug_lock);
}
static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status))
eeepc_rfkill_hotplug(eeepc, handle);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct eeepc_laptop *eeepc = data;
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
eeepc_rfkill_hotplug(eeepc, handle);
}
static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
char *node)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
status = acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify,
eeepc);
if (ACPI_FAILURE(status))
pr_warn("Failed to register notify on %s\n", node);
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
*/
eeepc_rfkill_hotplug(eeepc, handle);
return 0;
}
static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_FAILURE(status))
return;
status = acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
pr_err("Error removing rfkill notify handler %s\n",
node);
/*
* Refresh pci hotplug in case the rfkill
* state was changed after
* eeepc_unregister_rfkill_notifier()
*/
eeepc_rfkill_hotplug(eeepc, handle);
}
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
struct eeepc_laptop *eeepc;
int val;
eeepc = container_of(hotplug_slot, struct eeepc_laptop, hotplug_slot);
val = get_acpi(eeepc, CM_ASL_WLAN);
if (val == 1 || val == 0)
*value = val;
else
return -EINVAL;
return 0;
}
static const struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.get_adapter_status = eeepc_get_adapter_status,
.get_power_status = eeepc_get_adapter_status,
};
static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
if (!bus) {
pr_err("Unable to find wifi PCI bus\n");
return -ENODEV;
}
eeepc->hotplug_slot.ops = &eeepc_hotplug_slot_ops;
ret = pci_hp_register(&eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
}
return 0;
error_register:
eeepc->hotplug_slot.ops = NULL;
return ret;
}
/*
* Rfkill devices
*/
static int eeepc_rfkill_set(void *data, bool blocked)
{
acpi_handle handle = data;
return write_acpi_int(handle, NULL, !blocked);
}
static const struct rfkill_ops eeepc_rfkill_ops = {
.set_block = eeepc_rfkill_set,
};
static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
struct rfkill **rfkill,
const char *name,
enum rfkill_type type, int cm)
{
acpi_handle handle;
int result;
result = acpi_setter_handle(eeepc, cm, &handle);
if (result < 0)
return result;
*rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
&eeepc_rfkill_ops, handle);
if (!*rfkill)
return -EINVAL;
rfkill_init_sw_state(*rfkill, get_acpi(eeepc, cm) != 1);
result = rfkill_register(*rfkill);
if (result) {
rfkill_destroy(*rfkill);
*rfkill = NULL;
return result;
}
return 0;
}
static char EEEPC_RFKILL_NODE_1[] = "\\_SB.PCI0.P0P5";
static char EEEPC_RFKILL_NODE_2[] = "\\_SB.PCI0.P0P6";
static char EEEPC_RFKILL_NODE_3[] = "\\_SB.PCI0.P0P7";
static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
{
eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
eeepc_unregister_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
if (eeepc->wlan_rfkill) {
rfkill_unregister(eeepc->wlan_rfkill);
rfkill_destroy(eeepc->wlan_rfkill);
eeepc->wlan_rfkill = NULL;
}
if (eeepc->hotplug_slot.ops)
pci_hp_deregister(&eeepc->hotplug_slot);
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
rfkill_destroy(eeepc->bluetooth_rfkill);
eeepc->bluetooth_rfkill = NULL;
}
if (eeepc->wwan3g_rfkill) {
rfkill_unregister(eeepc->wwan3g_rfkill);
rfkill_destroy(eeepc->wwan3g_rfkill);
eeepc->wwan3g_rfkill = NULL;
}
if (eeepc->wimax_rfkill) {
rfkill_unregister(eeepc->wimax_rfkill);
rfkill_destroy(eeepc->wimax_rfkill);
eeepc->wimax_rfkill = NULL;
}
}
static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
{
int result = 0;
mutex_init(&eeepc->hotplug_lock);
result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
"eeepc-wlan", RFKILL_TYPE_WLAN,
CM_ASL_WLAN);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
"eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
CM_ASL_BLUETOOTH);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
"eeepc-wwan3g", RFKILL_TYPE_WWAN,
CM_ASL_3G);
if (result && result != -ENODEV)
goto exit;
result = eeepc_new_rfkill(eeepc, &eeepc->wimax_rfkill,
"eeepc-wimax", RFKILL_TYPE_WIMAX,
CM_ASL_WIMAX);
if (result && result != -ENODEV)
goto exit;
if (eeepc->hotplug_disabled)
return 0;
result = eeepc_setup_pci_hotplug(eeepc);
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
* don't fail in this case
*/
if (result == -EBUSY)
result = 0;
eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_1);
eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_2);
eeepc_register_rfkill_notifier(eeepc, EEEPC_RFKILL_NODE_3);
exit:
if (result && result != -ENODEV)
eeepc_rfkill_exit(eeepc);
return result;
}
/*
* Platform driver - hibernate/resume callbacks
*/
static int eeepc_hotk_thaw(struct device *device)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
if (eeepc->wlan_rfkill) {
int wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = get_acpi(eeepc, CM_ASL_WLAN);
if (wlan >= 0)
set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
}
static int eeepc_hotk_restore(struct device *device)
{
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
/* Refresh both wlan rfkill state and pci hotplug */
if (eeepc->wlan_rfkill) {
eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_1);
eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_2);
eeepc_rfkill_hotplug_update(eeepc, EEEPC_RFKILL_NODE_3);
}
if (eeepc->bluetooth_rfkill)
rfkill_set_sw_state(eeepc->bluetooth_rfkill,
get_acpi(eeepc, CM_ASL_BLUETOOTH) != 1);
if (eeepc->wwan3g_rfkill)
rfkill_set_sw_state(eeepc->wwan3g_rfkill,
get_acpi(eeepc, CM_ASL_3G) != 1);
if (eeepc->wimax_rfkill)
rfkill_set_sw_state(eeepc->wimax_rfkill,
get_acpi(eeepc, CM_ASL_WIMAX) != 1);
return 0;
}
static const struct dev_pm_ops eeepc_pm_ops = {
.thaw = eeepc_hotk_thaw,
.restore = eeepc_hotk_restore,
};
static struct platform_driver platform_driver = {
.driver = {
.name = EEEPC_LAPTOP_FILE,
.pm = &eeepc_pm_ops,
}
};
/*
* Hwmon device
*/
#define EEEPC_EC_SC00 0x61
#define EEEPC_EC_FAN_PWM (EEEPC_EC_SC00 + 2) /* Fan PWM duty cycle (%) */
#define EEEPC_EC_FAN_HRPM (EEEPC_EC_SC00 + 5) /* High byte, fan speed (RPM) */
#define EEEPC_EC_FAN_LRPM (EEEPC_EC_SC00 + 6) /* Low byte, fan speed (RPM) */
#define EEEPC_EC_SFB0 0xD0
#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
static inline int eeepc_pwm_to_lmsensors(int value)
{
return value * 255 / 100;
}
static inline int eeepc_lmsensors_to_pwm(int value)
{
value = clamp_val(value, 0, 255);
return value * 100 / 255;
}
static int eeepc_get_fan_pwm(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_PWM, &value);
return eeepc_pwm_to_lmsensors(value);
}
static void eeepc_set_fan_pwm(int value)
{
value = eeepc_lmsensors_to_pwm(value);
ec_write(EEEPC_EC_FAN_PWM, value);
}
static int eeepc_get_fan_rpm(void)
{
u8 high = 0;
u8 low = 0;
ec_read(EEEPC_EC_FAN_HRPM, &high);
ec_read(EEEPC_EC_FAN_LRPM, &low);
return high << 8 | low;
}
#define EEEPC_EC_FAN_CTRL_BIT 0x02
#define EEEPC_FAN_CTRL_MANUAL 1
#define EEEPC_FAN_CTRL_AUTO 2
static int eeepc_get_fan_ctrl(void)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
if (value & EEEPC_EC_FAN_CTRL_BIT)
return EEEPC_FAN_CTRL_MANUAL;
else
return EEEPC_FAN_CTRL_AUTO;
}
static void eeepc_set_fan_ctrl(int manual)
{
u8 value = 0;
ec_read(EEEPC_EC_FAN_CTRL, &value);
if (manual == EEEPC_FAN_CTRL_MANUAL)
value |= EEEPC_EC_FAN_CTRL_BIT;
else
value &= ~EEEPC_EC_FAN_CTRL_BIT;
ec_write(EEEPC_EC_FAN_CTRL, value);
}
static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
{
int rv, value;
rv = parse_arg(buf, &value);
if (rv < 0)
return rv;
set(value);
return count;
}
static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
{
return sprintf(buf, "%d\n", get());
}
#define EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return show_sys_hwmon(_get, buf); \
}
#define EEEPC_SENSOR_STORE_FUNC(_name, _set) \
static ssize_t _name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return store_sys_hwmon(_set, buf, count); \
}
#define EEEPC_CREATE_SENSOR_ATTR_RW(_name, _get, _set) \
EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
EEEPC_SENSOR_STORE_FUNC(_name, _set) \
static DEVICE_ATTR_RW(_name)
#define EEEPC_CREATE_SENSOR_ATTR_RO(_name, _get) \
EEEPC_SENSOR_SHOW_FUNC(_name, _get) \
static DEVICE_ATTR_RO(_name)
EEEPC_CREATE_SENSOR_ATTR_RO(fan1_input, eeepc_get_fan_rpm);
EEEPC_CREATE_SENSOR_ATTR_RW(pwm1, eeepc_get_fan_pwm,
eeepc_set_fan_pwm);
EEEPC_CREATE_SENSOR_ATTR_RW(pwm1_enable, eeepc_get_fan_ctrl,
eeepc_set_fan_ctrl);
static struct attribute *hwmon_attrs[] = {
&dev_attr_pwm1.attr,
&dev_attr_fan1_input.attr,
&dev_attr_pwm1_enable.attr,
NULL
};
ATTRIBUTE_GROUPS(hwmon);
static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
struct device *dev = &eeepc->platform_device->dev;
struct device *hwmon;
hwmon = devm_hwmon_device_register_with_groups(dev, "eeepc", NULL,
hwmon_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register eeepc hwmon device\n");
return PTR_ERR(hwmon);
}
return 0;
}
/*
* Backlight device
*/
static int read_brightness(struct backlight_device *bd)
{
struct eeepc_laptop *eeepc = bl_get_data(bd);
return get_acpi(eeepc, CM_ASL_PANELBRIGHT);
}
static int set_brightness(struct backlight_device *bd, int value)
{
struct eeepc_laptop *eeepc = bl_get_data(bd);
return set_acpi(eeepc, CM_ASL_PANELBRIGHT, value);
}
static int update_bl_status(struct backlight_device *bd)
{
return set_brightness(bd, bd->props.brightness);
}
static const struct backlight_ops eeepcbl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
{
struct backlight_device *bd = eeepc->backlight_device;
int old = bd->props.brightness;
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
{
struct backlight_properties props;
struct backlight_device *bd;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = 15;
bd = backlight_device_register(EEEPC_LAPTOP_FILE,
&eeepc->platform_device->dev, eeepc,
&eeepcbl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register eeepc backlight device\n");
eeepc->backlight_device = NULL;
return PTR_ERR(bd);
}
eeepc->backlight_device = bd;
bd->props.brightness = read_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
return 0;
}
static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
{
backlight_device_unregister(eeepc->backlight_device);
eeepc->backlight_device = NULL;
}
/*
* Input device (i.e. hotkeys)
*/
static int eeepc_input_init(struct eeepc_laptop *eeepc)
{
struct input_dev *input;
int error;
input = input_allocate_device();
if (!input)
return -ENOMEM;
input->name = "Asus EeePC extra buttons";
input->phys = EEEPC_LAPTOP_FILE "/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = &eeepc->platform_device->dev;
error = sparse_keymap_setup(input, eeepc_keymap, NULL);
if (error) {
pr_err("Unable to setup input device keymap\n");
goto err_free_dev;
}
error = input_register_device(input);
if (error) {
pr_err("Unable to register input device\n");
goto err_free_dev;
}
eeepc->inputdev = input;
return 0;
err_free_dev:
input_free_device(input);
return error;
}
static void eeepc_input_exit(struct eeepc_laptop *eeepc)
{
if (eeepc->inputdev)
input_unregister_device(eeepc->inputdev);
eeepc->inputdev = NULL;
}
/*
* ACPI driver
*/
static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
{
if (!eeepc->inputdev)
return;
if (!sparse_keymap_report_event(eeepc->inputdev, event, 1, true))
pr_info("Unknown key %x pressed\n", event);
}
static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
int old_brightness, new_brightness;
u16 count;
if (event > ACPI_MAX_SYS_NOTIFY)
return;
count = eeepc->event_count[event % 128]++;
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
count);
/* Brightness events are special */
if (event < NOTIFY_BRN_MIN || event > NOTIFY_BRN_MAX) {
eeepc_input_notify(eeepc, event);
return;
}
/* Ignore them completely if the acpi video driver is used */
if (!eeepc->backlight_device)
return;
/* Update the backlight device. */
old_brightness = eeepc_backlight_notify(eeepc);
/* Convert event to keypress (obsolescent hack) */
new_brightness = event - NOTIFY_BRN_MIN;
if (new_brightness < old_brightness) {
event = NOTIFY_BRN_MIN; /* brightness down */
} else if (new_brightness > old_brightness) {
event = NOTIFY_BRN_MAX; /* brightness up */
} else {
/*
* no change in brightness - already at min/max,
* event will be desired value (or else ignored)
*/
}
eeepc_input_notify(eeepc, event);
}
static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
{
const char *model;
model = dmi_get_system_info(DMI_PRODUCT_NAME);
if (!model)
return;
/*
* Blacklist for setting cpufv (cpu speed).
*
* EeePC 4G ("701") implements CFVS, but it is not supported
* by the pre-installed OS, and the original option to change it
* in the BIOS setup screen was removed in later versions.
*
* Judging by the lack of "Super Hybrid Engine" on Asus product pages,
* this applies to all "701" models (4G/4G Surf/2G Surf).
*
* So Asus made a deliberate decision not to support it on this model.
* We have several reports that using it can cause the system to hang
*
* The hang has also been reported on a "702" (Model name "8G"?).
*
* We avoid dmi_check_system() / dmi_match(), because they use
* substring matching. We don't want to affect the "701SD"
* and "701SDX" models, because they do support S.H.E.
*/
if (strcmp(model, "701") == 0 || strcmp(model, "702") == 0) {
eeepc->cpufv_disabled = true;
pr_info("model %s does not officially support setting cpu speed\n",
model);
pr_info("cpufv disabled to avoid instability\n");
}
/*
* Blacklist for wlan hotplug
*
* Eeepc 1005HA doesn't work like others models and don't need the
* hotplug code. In fact, current hotplug code seems to unplug another
* device...
*/
if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
strcmp(model, "1005PE") == 0) {
eeepc->hotplug_disabled = true;
pr_info("wlan hotplug disabled\n");
}
}
static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
{
int dummy;
/* Some BIOSes do not report cm although it is available.
Check if cm_getv[cm] works and, if yes, assume cm should be set. */
if (!(eeepc->cm_supported & (1 << cm))
&& !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
pr_info("%s (%x) not reported by BIOS, enabling anyway\n",
name, 1 << cm);
eeepc->cm_supported |= 1 << cm;
}
}
static void cmsg_quirks(struct eeepc_laptop *eeepc)
{
cmsg_quirk(eeepc, CM_ASL_LID, "LID");
cmsg_quirk(eeepc, CM_ASL_TYPE, "TYPE");
cmsg_quirk(eeepc, CM_ASL_PANELPOWER, "PANELPOWER");
cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
}
static int eeepc_acpi_init(struct eeepc_laptop *eeepc)
{
unsigned int init_flags;
int result;
result = acpi_bus_get_status(eeepc->device);
if (result)
return result;
if (!eeepc->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
return -ENODEV;
}
init_flags = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
pr_notice("Hotkey init flags 0x%x\n", init_flags);
if (write_acpi_int(eeepc->handle, "INIT", init_flags)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* get control methods supported */
if (read_acpi_int(eeepc->handle, "CMSG", &eeepc->cm_supported)) {
pr_err("Get control methods supported failed\n");
return -ENODEV;
}
cmsg_quirks(eeepc);
pr_info("Get control methods supported: 0x%x\n", eeepc->cm_supported);
return 0;
}
static void eeepc_enable_camera(struct eeepc_laptop *eeepc)
{
/*
* If the following call to set_acpi() fails, it's because there's no
* camera so we can ignore the error.
*/
if (get_acpi(eeepc, CM_ASL_CAMERA) == 0)
set_acpi(eeepc, CM_ASL_CAMERA, 1);
}
static bool eeepc_device_present;
static int eeepc_acpi_add(struct acpi_device *device)
{
struct eeepc_laptop *eeepc;
int result;
pr_notice(EEEPC_LAPTOP_NAME "\n");
eeepc = kzalloc(sizeof(struct eeepc_laptop), GFP_KERNEL);
if (!eeepc)
return -ENOMEM;
eeepc->handle = device->handle;
strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
device->driver_data = eeepc;
eeepc->device = device;
eeepc->hotplug_disabled = hotplug_disabled;
eeepc_dmi_check(eeepc);
result = eeepc_acpi_init(eeepc);
if (result)
goto fail_platform;
eeepc_enable_camera(eeepc);
/*
* Register the platform device first. It is used as a parent for the
* sub-devices below.
*
* Note that if there are multiple instances of this ACPI device it
* will bail out, because the platform device is registered with a
* fixed name. Of course it doesn't make sense to have more than one,
* and machine-specific scripts find the fixed name convenient. But
* It's also good for us to exclude multiple instances because both
* our hwmon and our wlan rfkill subdevice use global ACPI objects
* (the EC and the PCI wlan slot respectively).
*/
result = eeepc_platform_init(eeepc);
if (result)
goto fail_platform;
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
result = eeepc_backlight_init(eeepc);
if (result)
goto fail_backlight;
}
result = eeepc_input_init(eeepc);
if (result)
goto fail_input;
result = eeepc_hwmon_init(eeepc);
if (result)
goto fail_hwmon;
result = eeepc_led_init(eeepc);
if (result)
goto fail_led;
result = eeepc_rfkill_init(eeepc);
if (result)
goto fail_rfkill;
eeepc_device_present = true;
return 0;
fail_rfkill:
eeepc_led_exit(eeepc);
fail_led:
fail_hwmon:
eeepc_input_exit(eeepc);
fail_input:
eeepc_backlight_exit(eeepc);
fail_backlight:
eeepc_platform_exit(eeepc);
fail_platform:
kfree(eeepc);
return result;
}
static void eeepc_acpi_remove(struct acpi_device *device)
{
struct eeepc_laptop *eeepc = acpi_driver_data(device);
eeepc_backlight_exit(eeepc);
eeepc_rfkill_exit(eeepc);
eeepc_input_exit(eeepc);
eeepc_led_exit(eeepc);
eeepc_platform_exit(eeepc);
kfree(eeepc);
}
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_ACPI_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
static struct acpi_driver eeepc_acpi_driver = {
.name = EEEPC_LAPTOP_NAME,
.class = EEEPC_ACPI_CLASS,
.owner = THIS_MODULE,
.ids = eeepc_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = eeepc_acpi_add,
.remove = eeepc_acpi_remove,
.notify = eeepc_acpi_notify,
},
};
static int __init eeepc_laptop_init(void)
{
int result;
result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
result = acpi_bus_register_driver(&eeepc_acpi_driver);
if (result < 0)
goto fail_acpi_driver;
if (!eeepc_device_present) {
result = -ENODEV;
goto fail_no_device;
}
return 0;
fail_no_device:
acpi_bus_unregister_driver(&eeepc_acpi_driver);
fail_acpi_driver:
platform_driver_unregister(&platform_driver);
return result;
}
static void __exit eeepc_laptop_exit(void)
{
acpi_bus_unregister_driver(&eeepc_acpi_driver);
platform_driver_unregister(&platform_driver);
}
module_init(eeepc_laptop_init);
module_exit(eeepc_laptop_exit);
| linux-master | drivers/platform/x86/eeepc-laptop.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* WMI embedded Binary MOF driver
*
* Copyright (c) 2015 Andrew Lutomirski
* Copyright (C) 2017 VMware, Inc. All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/wmi.h>
#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
struct bmof_priv {
union acpi_object *bmofdata;
struct bin_attribute bmof_bin_attr;
};
static ssize_t read_bmof(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct bmof_priv *priv = container_of(attr, struct bmof_priv, bmof_bin_attr);
return memory_read_from_buffer(buf, count, &off, priv->bmofdata->buffer.pointer,
priv->bmofdata->buffer.length);
}
static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
{
struct bmof_priv *priv;
int ret;
priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, priv);
priv->bmofdata = wmidev_block_query(wdev, 0);
if (!priv->bmofdata) {
dev_err(&wdev->dev, "failed to read Binary MOF\n");
return -EIO;
}
if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
ret = -EIO;
goto err_free;
}
sysfs_bin_attr_init(&priv->bmof_bin_attr);
priv->bmof_bin_attr.attr.name = "bmof";
priv->bmof_bin_attr.attr.mode = 0400;
priv->bmof_bin_attr.read = read_bmof;
priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
ret = device_create_bin_file(&wdev->dev, &priv->bmof_bin_attr);
if (ret)
goto err_free;
return 0;
err_free:
kfree(priv->bmofdata);
return ret;
}
static void wmi_bmof_remove(struct wmi_device *wdev)
{
struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
device_remove_bin_file(&wdev->dev, &priv->bmof_bin_attr);
kfree(priv->bmofdata);
}
static const struct wmi_device_id wmi_bmof_id_table[] = {
{ .guid_string = WMI_BMOF_GUID },
{ },
};
static struct wmi_driver wmi_bmof_driver = {
.driver = {
.name = "wmi-bmof",
},
.probe = wmi_bmof_probe,
.remove = wmi_bmof_remove,
.id_table = wmi_bmof_id_table,
};
module_wmi_driver(wmi_bmof_driver);
MODULE_DEVICE_TABLE(wmi, wmi_bmof_id_table);
MODULE_AUTHOR("Andrew Lutomirski <[email protected]>");
MODULE_DESCRIPTION("WMI embedded Binary MOF driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/wmi-bmof.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010 Intel Corporation
* Author: Sreedhara DS ([email protected])
*
* This driver provides IOCTL interfaces to call Intel SCU IPC driver API.
*/
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/intel_scu_ipc.h>
static int major;
struct intel_scu_ipc_dev *scu;
static DEFINE_MUTEX(scu_lock);
/* IOCTL commands */
#define INTE_SCU_IPC_REGISTER_READ 0
#define INTE_SCU_IPC_REGISTER_WRITE 1
#define INTE_SCU_IPC_REGISTER_UPDATE 2
struct scu_ipc_data {
u32 count; /* No. of registers */
u16 addr[5]; /* Register addresses */
u8 data[5]; /* Register data */
u8 mask; /* Valid for read-modify-write */
};
/**
* scu_reg_access - implement register access ioctls
* @cmd: command we are doing (read/write/update)
* @data: kernel copy of ioctl data
*
* Allow the user to perform register accesses on the SCU via the
* kernel interface
*/
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
unsigned int count = data->count;
if (count == 0 || count == 3 || count > 4)
return -EINVAL;
switch (cmd) {
case INTE_SCU_IPC_REGISTER_READ:
return intel_scu_ipc_dev_readv(scu, data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_WRITE:
return intel_scu_ipc_dev_writev(scu, data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_UPDATE:
return intel_scu_ipc_dev_update(scu, data->addr[0], data->data[0],
data->mask);
default:
return -ENOTTY;
}
}
/**
* scu_ipc_ioctl - control ioctls for the SCU
* @fp: file handle of the SCU device
* @cmd: ioctl coce
* @arg: pointer to user passed structure
*
* Support the I/O and firmware flashing interfaces of the SCU
*/
static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct scu_ipc_data data;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
return -EFAULT;
ret = scu_reg_access(cmd, &data);
if (ret < 0)
return ret;
if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
return -EFAULT;
return 0;
}
static int scu_ipc_open(struct inode *inode, struct file *file)
{
int ret = 0;
/* Only single open at the time */
mutex_lock(&scu_lock);
if (scu) {
ret = -EBUSY;
goto unlock;
}
scu = intel_scu_ipc_dev_get();
if (!scu)
ret = -ENODEV;
unlock:
mutex_unlock(&scu_lock);
return ret;
}
static int scu_ipc_release(struct inode *inode, struct file *file)
{
mutex_lock(&scu_lock);
intel_scu_ipc_dev_put(scu);
scu = NULL;
mutex_unlock(&scu_lock);
return 0;
}
static const struct file_operations scu_ipc_fops = {
.unlocked_ioctl = scu_ipc_ioctl,
.open = scu_ipc_open,
.release = scu_ipc_release,
};
static int __init ipc_module_init(void)
{
major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
if (major < 0)
return major;
return 0;
}
static void __exit ipc_module_exit(void)
{
unregister_chrdev(major, "intel_mid_scu");
}
module_init(ipc_module_init);
module_exit(ipc_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Utility driver for intel scu ipc");
MODULE_AUTHOR("Sreedhara <[email protected]>");
| linux-master | drivers/platform/x86/intel_scu_ipcutil.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
* Mellanox platform driver
*
* Copyright (C) 2016-2018 Mellanox Technologies
* Copyright (C) 2016-2018 Vadim Pasternak <[email protected]>
*/
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-mux-reg.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#define MLX_PLAT_DEVICE_NAME "mlxplat"
/* LPC bus IO offsets */
#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
#define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00
#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03
#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET 0x04
#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET 0x05
#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET 0x06
#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET 0x07
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET 0x08
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
#define MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET 0x17
#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
#define MLXPLAT_CPLD_LPC_REG_LED1_OFFSET 0x20
#define MLXPLAT_CPLD_LPC_REG_LED2_OFFSET 0x21
#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
#define MLXPLAT_CPLD_LPC_REG_LED6_OFFSET 0x25
#define MLXPLAT_CPLD_LPC_REG_LED7_OFFSET 0x26
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
#define MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET 0x2d
#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
#define MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET 0x2f
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
#define MLXPLAT_CPLD_LPC_REG_WP2_OFFSET 0x33
#define MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE 0x34
#define MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET 0x35
#define MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET 0x36
#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
#define MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET 0x3c
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
#define MLXPLAT_CPLD_LPC_REG_BRD_OFFSET 0x47
#define MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET 0x48
#define MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET 0x49
#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
#define MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET 0x53
#define MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET 0x54
#define MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET 0x55
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
#define MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET 0x59
#define MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET 0x5a
#define MLXPLAT_CPLD_LPC_REG_PWR_OFFSET 0x64
#define MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET 0x65
#define MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET 0x66
#define MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET 0x70
#define MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET 0x71
#define MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET 0x72
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
#define MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET 0x8e
#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET 0x8f
#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET 0x90
#define MLXPLAT_CPLD_LPC_REG_EROT_OFFSET 0x91
#define MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET 0x92
#define MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET 0x93
#define MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET 0x94
#define MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET 0x95
#define MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET 0x96
#define MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET 0x97
#define MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET 0x98
#define MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET 0x99
#define MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET 0x9a
#define MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET 0x9b
#define MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET 0x9c
#define MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET 0x9d
#define MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET 0x9e
#define MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET 0x9f
#define MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET 0xa0
#define MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET 0xa1
#define MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET 0xa2
#define MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET 0xa3
#define MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET 0xa4
#define MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET 0xa5
#define MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET 0xa6
#define MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET 0xa7
#define MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET 0xa8
#define MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET 0xa9
#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0xb6
#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0xb7
#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0xb8
#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0xb9
#define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2
#define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3
#define MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET 0xc4
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
#define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb
#define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd
#define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce
#define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
#define MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET 0xd9
#define MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET 0xdb
#define MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET 0xda
#define MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET 0xdc
#define MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET 0xdd
#define MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET 0xde
#define MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET 0xdf
#define MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET 0xe0
#define MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET 0xe1
#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
#define MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET 0xe6
#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
#define MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET 0xea
#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
#define MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET 0xf1
#define MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET 0xf2
#define MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET 0xf3
#define MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET 0xf4
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG4 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
#define MLXPLAT_CPLD_AGGR_PSU_MASK_DEF 0x08
#define MLXPLAT_CPLD_AGGR_PWR_MASK_DEF 0x08
#define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40
#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \
MLXPLAT_CPLD_AGGR_FAN_MASK_DEF)
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_NG 0x01
#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
#define MLXPLAT_CPLD_AGGR_MASK_COMEX BIT(0)
#define MLXPLAT_CPLD_AGGR_MASK_LC BIT(3)
#define MLXPLAT_CPLD_AGGR_MASK_MODULAR (MLXPLAT_CPLD_AGGR_MASK_NG_DEF | \
MLXPLAT_CPLD_AGGR_MASK_COMEX | \
MLXPLAT_CPLD_AGGR_MASK_LC)
#define MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT BIT(0)
#define MLXPLAT_CPLD_AGGR_MASK_LC_RDY BIT(1)
#define MLXPLAT_CPLD_AGGR_MASK_LC_PG BIT(2)
#define MLXPLAT_CPLD_AGGR_MASK_LC_SCRD BIT(3)
#define MLXPLAT_CPLD_AGGR_MASK_LC_SYNC BIT(4)
#define MLXPLAT_CPLD_AGGR_MASK_LC_ACT BIT(5)
#define MLXPLAT_CPLD_AGGR_MASK_LC_SDWN BIT(6)
#define MLXPLAT_CPLD_AGGR_MASK_LC_LOW (MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT | \
MLXPLAT_CPLD_AGGR_MASK_LC_RDY | \
MLXPLAT_CPLD_AGGR_MASK_LC_PG | \
MLXPLAT_CPLD_AGGR_MASK_LC_SCRD | \
MLXPLAT_CPLD_AGGR_MASK_LC_SYNC | \
MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT GENMASK(5, 4)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(6, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FU_CAP_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0)
#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(6)
#define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3)
#define MLXPLAT_CPLD_THERMAL2_PDB_MASK BIT(4)
#define MLXPLAT_CPLD_INTRUSION_MASK BIT(6)
#define MLXPLAT_CPLD_PWM_PG_MASK BIT(7)
#define MLXPLAT_CPLD_L1_CHA_HEALTH_MASK (MLXPLAT_CPLD_THERMAL1_PDB_MASK | \
MLXPLAT_CPLD_THERMAL2_PDB_MASK | \
MLXPLAT_CPLD_INTRUSION_MASK |\
MLXPLAT_CPLD_PWM_PG_MASK)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
#define MLXPLAT_CPLD_SYS_RESET_MASK BIT(0)
/* Masks for aggregation for comex carriers */
#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \
MLXPLAT_CPLD_AGGR_MASK_CARRIER)
#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
/* Masks for aggregation for modular systems */
#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
#define MLXPLAT_CPLD_HALT_MASK BIT(3)
#define MLXPLAT_CPLD_RESET_MASK GENMASK(7, 1)
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
/* Maximum number of possible physical buses equipped on system */
#define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16
#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24
/* Number of channels in group */
#define MLXPLAT_CPLD_GRP_CHNL_NUM 8
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
#define MLXPLAT_CPLD_CH3 18
#define MLXPLAT_CPLD_CH2_ETH_MODULAR 3
#define MLXPLAT_CPLD_CH3_ETH_MODULAR 43
#define MLXPLAT_CPLD_CH4_ETH_MODULAR 51
#define MLXPLAT_CPLD_CH2_RACK_SWITCH 18
#define MLXPLAT_CPLD_CH2_NG800 34
/* Number of LPC attached MUX platform devices */
#define MLXPLAT_CPLD_LPC_MUX_DEVS 4
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
#define MLXPLAT_CPLD_NR_ASIC 3
#define MLXPLAT_CPLD_NR_LC_BASE 34
#define MLXPLAT_CPLD_NR_LC_SET(nr) (MLXPLAT_CPLD_NR_LC_BASE + (nr))
#define MLXPLAT_CPLD_LC_ADDR 0x32
/* Masks and default values for watchdogs */
#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
#define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1))
#define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0
#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
#define MLXPLAT_CPLD_WD_CPBLTY_MASK (GENMASK(7, 0) & ~BIT(6))
#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600
#define MLXPLAT_CPLD_WD_MAX_DEVS 2
#define MLXPLAT_CPLD_LPC_SYSIRQ 17
/* Minimum power required for turning on Ethernet modular system (WATT) */
#define MLXPLAT_CPLD_ETH_MODULAR_PWR_MIN 50
/* Default value for PWM control register for rack switch system */
#define MLXPLAT_REGMAP_NVSWITCH_PWM_DEFAULT 0xf4
#define MLXPLAT_I2C_MAIN_BUS_NOTIFIED 0x01
#define MLXPLAT_I2C_MAIN_BUS_HANDLE_CREATED 0x02
/* Lattice FPGA PCI configuration */
#define PCI_VENDOR_ID_LATTICE 0x1204
#define PCI_DEVICE_ID_LATTICE_I2C_BRIDGE 0x9c2f
#define PCI_DEVICE_ID_LATTICE_JTAG_BRIDGE 0x9c30
#define PCI_DEVICE_ID_LATTICE_LPC_BRIDGE 0x9c32
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
* @pdev_hotplug - hotplug platform devices
* @pdev_led - led platform devices
* @pdev_io_regs - register access platform devices
* @pdev_fan - FAN platform devices
* @pdev_wd - array of watchdog platform devices
* @regmap: device register map
* @hotplug_resources: system hotplug resources
* @hotplug_resources_size: size of system hotplug resources
* @hi2c_main_init_status: init status of I2C main bus
* @irq_fpga: FPGA IRQ number
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
struct platform_device *pdev_hotplug;
struct platform_device *pdev_led;
struct platform_device *pdev_io_regs;
struct platform_device *pdev_fan;
struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
void *regmap;
struct resource *hotplug_resources;
unsigned int hotplug_resources_size;
u8 i2c_main_init_status;
int irq_fpga;
};
static struct platform_device *mlxplat_dev;
static int mlxplat_i2c_main_complition_notify(void *handle, int id);
static void __iomem *i2c_bridge_addr, *jtag_bridge_addr;
/* Regions for LPC I2C controller and LPC base register space */
static const struct resource mlxplat_lpc_resources[] = {
[0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
MLXPLAT_CPLD_LPC_IO_RANGE,
"mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
[1] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_REG_BASE_ADRR,
MLXPLAT_CPLD_LPC_IO_RANGE,
"mlxplat_cpld_lpc_regs",
IORESOURCE_IO),
};
/* Platform systems default i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_default_data = {
.completion_notify = mlxplat_i2c_main_complition_notify,
};
/* Platform i2c next generation systems data */
static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
{
.reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.mask = MLXPLAT_CPLD_I2C_CAP_MASK,
.bit = MLXPLAT_CPLD_I2C_CAP_BIT,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = {
{
.data = mlxplat_mlxcpld_i2c_ng_items_data,
},
};
/* Platform next generation systems i2c data */
static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
.items = mlxplat_mlxcpld_i2c_ng_items,
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_I2C,
.completion_notify = mlxplat_i2c_main_complition_notify,
};
/* Platform default channels */
static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = {
{
MLXPLAT_CPLD_CH1, MLXPLAT_CPLD_CH1 + 1, MLXPLAT_CPLD_CH1 + 2,
MLXPLAT_CPLD_CH1 + 3, MLXPLAT_CPLD_CH1 + 4, MLXPLAT_CPLD_CH1 +
5, MLXPLAT_CPLD_CH1 + 6, MLXPLAT_CPLD_CH1 + 7
},
{
MLXPLAT_CPLD_CH2, MLXPLAT_CPLD_CH2 + 1, MLXPLAT_CPLD_CH2 + 2,
MLXPLAT_CPLD_CH2 + 3, MLXPLAT_CPLD_CH2 + 4, MLXPLAT_CPLD_CH2 +
5, MLXPLAT_CPLD_CH2 + 6, MLXPLAT_CPLD_CH2 + 7
},
};
/* Platform channels for MSN21xx system family */
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
},
};
/* Platform mux configuration variables */
static int mlxplat_max_adap_num;
static int mlxplat_mux_num;
static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
static struct notifier_block *mlxplat_reboot_nb;
/* Platform extended mux data */
static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
.reg_size = 1,
.idle_in_use = 1,
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH3,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
},
};
/* Platform channels for modular system family */
static const int mlxplat_modular_upper_channel[] = { 1 };
static const int mlxplat_modular_channels[] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
38, 39, 40
};
/* Platform modular mux data */
static struct i2c_mux_reg_platform_data mlxplat_modular_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG4,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_modular_upper_channel,
.n_values = ARRAY_SIZE(mlxplat_modular_upper_channel),
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2_ETH_MODULAR,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_modular_channels,
.n_values = ARRAY_SIZE(mlxplat_modular_channels),
},
{
.parent = MLXPLAT_CPLD_CH1,
.base_nr = MLXPLAT_CPLD_CH3_ETH_MODULAR,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_msn21xx_channels,
.n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_msn21xx_channels,
.n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
},
};
/* Platform channels for rack switch system family */
static const int mlxplat_rack_switch_channels[] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
};
/* Platform rack switch mux data */
static struct i2c_mux_reg_platform_data mlxplat_rack_switch_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_rack_switch_channels,
.n_values = ARRAY_SIZE(mlxplat_rack_switch_channels),
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2_RACK_SWITCH,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_msn21xx_channels,
.n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
},
};
/* Platform channels for ng800 system family */
static const int mlxplat_ng800_channels[] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
};
/* Platform ng800 mux data */
static struct i2c_mux_reg_platform_data mlxplat_ng800_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_ng800_channels,
.n_values = ARRAY_SIZE(mlxplat_ng800_channels),
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2_NG800,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
.values = mlxplat_msn21xx_channels,
.n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
},
};
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
I2C_BOARD_INFO("dps460", 0x59),
},
{
I2C_BOARD_INFO("dps460", 0x58),
},
};
static struct i2c_board_info mlxplat_mlxcpld_ext_pwr[] = {
{
I2C_BOARD_INFO("dps460", 0x5b),
},
{
I2C_BOARD_INFO("dps460", 0x5a),
},
};
static struct i2c_board_info mlxplat_mlxcpld_pwr_ng800[] = {
{
I2C_BOARD_INFO("dps460", 0x59),
},
{
I2C_BOARD_INFO("dps460", 0x5a),
},
};
static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
{
I2C_BOARD_INFO("24c32", 0x50),
},
{
I2C_BOARD_INFO("24c32", 0x50),
},
{
I2C_BOARD_INFO("24c32", 0x50),
},
{
I2C_BOARD_INFO("24c32", 0x50),
},
};
/* Platform hotplug comex carrier system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = {
{
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
/* Platform hotplug default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
{
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_wc_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_ng800_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr_ng800[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr_ng800[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
{
.label = "fan1",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[0],
.hpdev.nr = MLXPLAT_CPLD_FAN1_DEFAULT_NR,
},
{
.label = "fan2",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[1],
.hpdev.nr = MLXPLAT_CPLD_FAN2_DEFAULT_NR,
},
{
.label = "fan3",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[2],
.hpdev.nr = MLXPLAT_CPLD_FAN3_DEFAULT_NR,
},
{
.label = "fan4",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[3],
.hpdev.nr = MLXPLAT_CPLD_FAN4_DEFAULT_NR,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_asic_items_data[] = {
{
.label = "asic1",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_asic2_items_data[] = {
{
.label = "asic2",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
{
.data = mlxplat_mlxcpld_default_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = {
{
.data = mlxplat_mlxcpld_comex_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.items = mlxplat_mlxcpld_default_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
static struct mlxreg_core_item mlxplat_mlxcpld_default_wc_items[] = {
{
.data = mlxplat_mlxcpld_comex_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_pwr_wc_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_wc_data = {
.items = mlxplat_mlxcpld_default_wc_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_wc_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
.items = mlxplat_mlxcpld_comex_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK,
};
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
/* Platform hotplug MSN21xx system family data */
static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = {
{
.data = mlxplat_mlxcpld_msn21xx_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = {
.items = mlxplat_mlxcpld_msn21xx_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug msn274x system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
{
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_fan_items_data[] = {
{
.label = "fan1",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan2",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan3",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(2),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan4",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(3),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = {
{
.data = mlxplat_mlxcpld_msn274x_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_msn274x_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn274x_data = {
.items = mlxplat_mlxcpld_msn274x_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug MSN201x system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn201x_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
{
.data = mlxplat_mlxcpld_msn201x_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
.items = mlxplat_mlxcpld_msn201x_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug next generation system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
{
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = {
{
.label = "fan1",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan2",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(1),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan3",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(2),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan4",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(3),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan5",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(4),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan6",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(5),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "fan7",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(6),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(6),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
{
.data = mlxplat_mlxcpld_default_ng_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
.items = mlxplat_mlxcpld_default_ng_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug extended system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = {
{
.label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu3",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(2),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "psu4",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(3),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr3",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr4",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
{
.data = mlxplat_mlxcpld_ext_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_ext_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
{
.data = mlxplat_mlxcpld_default_asic2_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic2_items_data),
.inversed = 0,
.health = true,
}
};
static struct mlxreg_core_item mlxplat_mlxcpld_ng800_items[] = {
{
.data = mlxplat_mlxcpld_default_ng_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_pwr_ng800_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_ng800_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
.inversed = 0,
.health = true,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.items = mlxplat_mlxcpld_ext_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ng800_data = {
.items = mlxplat_mlxcpld_ng800_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_ng800_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
{
.label = "pwr1",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr3",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
{
.label = "pwr4",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_lc_act = {
.irq = MLXPLAT_CPLD_LPC_SYSIRQ,
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_asic_items_data[] = {
{
.label = "asic1",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct i2c_board_info mlxplat_mlxcpld_lc_i2c_dev[] = {
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
{
I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
.platform_data = &mlxplat_mlxcpld_lc_act,
},
};
static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_modular_lc_notifier[] = {
{
.identity = "lc1",
},
{
.identity = "lc2",
},
{
.identity = "lc3",
},
{
.identity = "lc4",
},
{
.identity = "lc5",
},
{
.identity = "lc6",
},
{
.identity = "lc7",
},
{
.identity = "lc8",
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pr_items_data[] = {
{
.label = "lc1_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_present",
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ver_items_data[] = {
{
.label = "lc1_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(0),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(1),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(2),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(3),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(4),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(5),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(6),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_verified",
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = BIT(7),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pg_data[] = {
{
.label = "lc1_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_powered",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ready_data[] = {
{
.label = "lc1_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_ready",
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_synced_data[] = {
{
.label = "lc1_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_synced",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_act_data[] = {
{
.label = "lc1_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_active",
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_sd_data[] = {
{
.label = "lc1_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
.slot = 1,
},
{
.label = "lc2_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
.slot = 2,
},
{
.label = "lc3_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
.slot = 3,
},
{
.label = "lc4_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
.slot = 4,
},
{
.label = "lc5_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(4),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
.slot = 5,
},
{
.label = "lc6_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(5),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
.slot = 6,
},
{
.label = "lc7_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(6),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
.slot = 7,
},
{
.label = "lc8_shutdown",
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = BIT(7),
.hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
.hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
.slot = 8,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_modular_items[] = {
{
.data = mlxplat_mlxcpld_ext_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_asic_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_asic_items_data),
.inversed = 0,
.health = true,
},
{
.data = mlxplat_mlxcpld_modular_lc_pr_items_data,
.kind = MLXREG_HOTPLUG_LC_PRESENT,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pr_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_ver_items_data,
.kind = MLXREG_HOTPLUG_LC_VERIFIED,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ver_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_pg_data,
.kind = MLXREG_HOTPLUG_LC_POWERED,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pg_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_ready_data,
.kind = MLXREG_HOTPLUG_LC_READY,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ready_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_synced_data,
.kind = MLXREG_HOTPLUG_LC_SYNCED,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_synced_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_act_data,
.kind = MLXREG_HOTPLUG_LC_ACTIVE,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_act_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_modular_lc_sd_data,
.kind = MLXREG_HOTPLUG_LC_THERMAL,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
.reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
.mask = MLXPLAT_CPLD_LPC_LC_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_sd_data),
.inversed = 0,
.health = false,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.items = mlxplat_mlxcpld_modular_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_MODULAR,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug for NVLink blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
{
.label = "global_wp_grant",
.reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
.mask = MLXPLAT_CPLD_GWP_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_chassis_blade_items[] = {
{
.data = mlxplat_mlxcpld_global_wp_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
.mask = MLXPLAT_CPLD_GWP_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_global_wp_items_data),
.inversed = 0,
.health = false,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_chassis_blade_data = {
.items = mlxplat_mlxcpld_chassis_blade_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Platform hotplug for switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_erot_ap_items_data[] = {
{
.label = "erot1_ap",
.reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "erot2_ap",
.reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_erot_error_items_data[] = {
{
.label = "erot1_error",
.reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
.mask = BIT(0),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "erot2_error",
.reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
.mask = BIT(1),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_rack_switch_items[] = {
{
.data = mlxplat_mlxcpld_ext_psu_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = MLXPLAT_CPLD_PSU_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_ext_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_EXT_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
.count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
.inversed = 0,
.health = false,
},
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_erot_ap_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
.mask = MLXPLAT_CPLD_EROT_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_erot_ap_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_erot_error_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
.mask = MLXPLAT_CPLD_EROT_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_erot_error_items_data),
.inversed = 1,
.health = false,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_rack_switch_data = {
.items = mlxplat_mlxcpld_rack_switch_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_rack_switch_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
/* Callback performs graceful shutdown after notification about power button event */
static int
mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
u8 action)
{
if (action) {
dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button");
kernel_power_off();
}
return 0;
}
static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_pwr_events_notifier = {
.user_handler = mlxplat_mlxcpld_l1_switch_pwr_events_handler,
};
/* Platform hotplug for l1 switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_pwr_events_items_data[] = {
{
.label = "power_button",
.reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
.mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_l1_switch_pwr_events_notifier,
},
};
/* Callback activates latch reset flow after notification about intrusion event */
static int
mlxplat_mlxcpld_l1_switch_intrusion_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
u8 action)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
u32 regval;
int err;
err = regmap_read(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, ®val);
if (err)
goto fail_regmap_read;
if (action) {
dev_info(&mlxplat_dev->dev, "Detected intrusion - system latch is opened");
err = regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
regval | MLXPLAT_CPLD_LATCH_RST_MASK);
} else {
dev_info(&mlxplat_dev->dev, "System latch is properly closed");
err = regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
regval & ~MLXPLAT_CPLD_LATCH_RST_MASK);
}
if (err)
goto fail_regmap_write;
return 0;
fail_regmap_read:
fail_regmap_write:
dev_err(&mlxplat_dev->dev, "Register access failed");
return err;
}
static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_intrusion_events_notifier = {
.user_handler = mlxplat_mlxcpld_l1_switch_intrusion_events_handler,
};
static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_health_events_items_data[] = {
{
.label = "thermal1_pdb",
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_THERMAL1_PDB_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "thermal2_pdb",
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_THERMAL2_PDB_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
{
.label = "intrusion",
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_INTRUSION_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
.hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_l1_switch_intrusion_events_notifier,
},
{
.label = "pwm_pg",
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_PWM_PG_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
};
static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = {
{
.data = mlxplat_mlxcpld_default_ng_fan_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = MLXPLAT_CPLD_FAN_NG_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_erot_ap_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
.mask = MLXPLAT_CPLD_EROT_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_erot_ap_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_erot_error_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
.mask = MLXPLAT_CPLD_EROT_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_erot_error_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_l1_switch_pwr_events_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
.mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_pwr_events_items_data),
.inversed = 1,
.health = false,
},
{
.data = mlxplat_mlxcpld_l1_switch_health_events_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_L1_CHA_HEALTH_MASK,
.count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_health_events_items_data),
.inversed = 1,
.health = false,
.ind = 8,
},
};
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_l1_switch_data = {
.items = mlxplat_mlxcpld_l1_switch_events_items,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_events_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT,
};
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "psu:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan1:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan2:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan3:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan4:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_default_led_data = {
.data = mlxplat_mlxcpld_default_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_data),
};
/* Platform led default data for water cooling */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_wc_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "psu:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
.data = mlxplat_mlxcpld_default_led_wc_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
};
/* Platform led default data for water cooling Ethernet switch blade */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_eth_wc_blade_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
};
static struct mlxreg_core_platform_data mlxplat_default_led_eth_wc_blade_data = {
.data = mlxplat_mlxcpld_default_led_eth_wc_blade_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_eth_wc_blade_data),
};
/* Platform led MSN21xx system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "fan:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "psu1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "psu1:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "psu2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu2:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_msn21xx_led_data = {
.data = mlxplat_mlxcpld_msn21xx_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_led_data),
};
/* Platform led for default data for 200GbE systems */
static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "psu:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan1:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan2:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan3:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan4:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan5:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan5:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan6:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "fan6:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "fan7:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(6),
},
{
.label = "fan7:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(6),
},
{
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = {
.data = mlxplat_mlxcpld_default_ng_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data),
};
/* Platform led for Comex based 100GbE systems */
static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "psu:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan1:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan2:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan3:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan4:red",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
.data = mlxplat_mlxcpld_comex_100G_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
};
/* Platform led for data for modular systems */
static struct mlxreg_core_data mlxplat_mlxcpld_modular_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "psu:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "psu:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
},
{
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan1:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan2:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan3:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan4:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan5:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan5:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan6:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "fan6:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "fan7:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(6),
},
{
.label = "fan7:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(6),
},
{
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan_front:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "fan_front:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "mgmt:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "mgmt:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_modular_led_data = {
.data = mlxplat_mlxcpld_modular_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_led_data),
};
/* Platform led data for chassis system */
static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_led_data[] = {
{
.label = "status:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
{
.label = "status:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
},
{
.label = "fan1:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan1:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(0),
},
{
.label = "fan2:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan2:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(1),
},
{
.label = "fan3:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan3:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(2),
},
{
.label = "fan4:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan4:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(3),
},
{
.label = "fan5:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan5:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(4),
},
{
.label = "fan6:green",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "fan6:orange",
.reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
.mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
.bit = BIT(5),
},
{
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
},
};
static struct mlxreg_core_platform_data mlxplat_l1_switch_led_data = {
.data = mlxplat_mlxcpld_l1_switch_led_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_led_data),
};
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld2_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_short_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_ref",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_main_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_sw_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_fw_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_hotswap_or_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_asic_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0200,
},
{
.label = "psu2_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0200,
},
{
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "pwr_down",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "select_iio",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.bit = 1,
.mode = 0444,
},
};
static struct mlxreg_core_platform_data mlxplat_default_regs_io_data = {
.data = mlxplat_mlxcpld_default_regs_io_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_regs_io_data),
};
/* Platform register access MSN21xx, MSN201x, MSN274x systems families data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld2_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_short_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_ref",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_sw_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_main_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_asic_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_hotswap_or_halt",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_sff_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0200,
},
{
.label = "psu2_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0200,
},
{
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "pwr_down",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "select_iio",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.bit = 1,
.mode = 0444,
},
};
static struct mlxreg_core_platform_data mlxplat_msn21xx_regs_io_data = {
.data = mlxplat_mlxcpld_msn21xx_regs_io_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_regs_io_data),
};
/* Platform register access for next generation systems families data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld3_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld4_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld5_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld2_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld3_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld4_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld5_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld3_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld4_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld5_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "asic_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "asic2_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "erot1_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "erot2_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0644,
},
{
.label = "clk_brd_prog_en",
.reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0644,
.secured = 1,
},
{
.label = "erot1_recovery",
.reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "erot2_recovery",
.reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0644,
},
{
.label = "erot1_wp",
.reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
.secured = 1,
},
{
.label = "erot2_wp",
.reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
.secured = 1,
},
{
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_short_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_ref",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_swb_dc_dc_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_from_asic",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_swb_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_asic_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "reset_sw_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_comex_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_platform",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_soc",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_pwr_converter_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_system",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_sw_pwr_off",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_reload_bios",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_ac_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_ac_ok_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0200,
},
{
.label = "psu2_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0200,
},
{
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "pwr_down",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "deep_pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0200,
},
{
.label = "latch_reset",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0200,
},
{
.label = "jtag_cap",
.reg = MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET,
.mask = MLXPLAT_CPLD_FU_CAP_MASK,
.bit = 1,
.mode = 0444,
},
{
.label = "jtag_enable",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "dbg1",
.reg = MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0644,
},
{
.label = "dbg2",
.reg = MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0644,
},
{
.label = "dbg3",
.reg = MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0644,
},
{
.label = "dbg4",
.reg = MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0644,
},
{
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.bit = 1,
.mode = 0444,
},
{
.label = "asic2_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.bit = 1,
.mode = 0444,
},
{
.label = "fan_dir",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "bios_safe_mode",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "bios_active_image",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "bios_auth_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "bios_upgrade_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "voltreg_update_status",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
.mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
.bit = 5,
.mode = 0444,
},
{
.label = "pwr_converter_prog_en",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
.secured = 1,
},
{
.label = "vpd_wp",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "pcie_asic_reset_dis",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "erot1_ap_reset",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "erot2_ap_reset",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "lid_open",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "clk_brd1_boot_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "clk_brd2_boot_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "clk_brd_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "asic_pg_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "spi_chnl_select",
.reg = MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT,
.mask = GENMASK(7, 0),
.bit = 1,
.mode = 0644,
},
{
.label = "config1",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config2",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config3",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
};
static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
.data = mlxplat_mlxcpld_default_ng_regs_io_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_regs_io_data),
};
/* Platform register access for modular systems families data */
static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld3_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld4_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld2_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld3_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld4_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld2_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld3_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld4_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "lc1_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "lc2_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0644,
},
{
.label = "lc3_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0644,
},
{
.label = "lc4_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "lc5_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "lc6_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
},
{
.label = "lc7_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "lc8_enable",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0644,
},
{
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_short_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_fu",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_mgmt_dc_dc_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_sys_comex_bios",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_sw_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_reload",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_comex_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_platform",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_soc",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_pwr_off_from_carrier",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "reset_swb_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_swb_aux_pwr_or_fu",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_swb_dc_dc_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_swb_12v_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_system",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_thermal_spc_or_pciesw",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "bios_safe_mode",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "bios_active_image",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "bios_auth_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "bios_upgrade_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "voltreg_update_status",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
.mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
.bit = 5,
.mode = 0444,
},
{
.label = "vpd_wp",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "pcie_asic_reset_dis",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "shutdown_unlock",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
},
{
.label = "lc1_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0200,
},
{
.label = "lc2_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0200,
},
{
.label = "lc3_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "lc4_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "lc5_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0200,
},
{
.label = "lc6_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0200,
},
{
.label = "lc7_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0200,
},
{
.label = "lc8_rst_mask",
.reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0200,
},
{
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0200,
},
{
.label = "psu2_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0200,
},
{
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "pwr_down",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "psu3_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0200,
},
{
.label = "psu4_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0200,
},
{
.label = "auto_power_mode",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "pm_mgmt_en",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0644,
},
{
.label = "jtag_enable",
.reg = MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE,
.mask = GENMASK(3, 0),
.bit = 1,
.mode = 0644,
},
{
.label = "safe_bios_dis",
.reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
},
{
.label = "safe_bios_dis_wp",
.reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
},
{
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
.bit = 1,
.mode = 0444,
},
{
.label = "fan_dir",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "lc1_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "lc2_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0644,
},
{
.label = "lc3_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0644,
},
{
.label = "lc4_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "lc5_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "lc6_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0644,
},
{
.label = "lc7_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0644,
},
{
.label = "lc8_pwr",
.reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0644,
},
{
.label = "config1",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config2",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config3",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
};
static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.data = mlxplat_mlxcpld_modular_regs_io_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
/* Platform register access for chassis blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_chassis_blade_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
.mode = 0444,
.regnum = 2,
},
{
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "reset_aux_pwr_or_ref",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_from_comex",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_comex_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_platform",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "reset_soc",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_comex_wd",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_voltmon_upgrade_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "reset_system",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(1),
.mode = 0444,
},
{
.label = "reset_sw_pwr_off",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0444,
},
{
.label = "reset_comex_thermal",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
.label = "reset_reload_bios",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "reset_ac_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "reset_long_pwr_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
.mode = 0200,
},
{
.label = "pwr_down",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0200,
},
{
.label = "global_wp_request",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0644,
},
{
.label = "jtag_enable",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "comm_chnl_ready",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0200,
},
{
.label = "bios_safe_mode",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0444,
},
{
.label = "bios_active_image",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(5),
.mode = 0444,
},
{
.label = "bios_auth_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.mode = 0444,
},
{
.label = "bios_upgrade_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(7),
.mode = 0444,
},
{
.label = "voltreg_update_status",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
.mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
.bit = 5,
.mode = 0444,
},
{
.label = "vpd_wp",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0644,
},
{
.label = "pcie_asic_reset_dis",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
.label = "global_wp_response",
.reg = MLXPLAT_CPLD_LPC_REG_GWP_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
},
{
.label = "config1",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config2",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "config3",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
{
.label = "ufm_version",
.reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
.bit = GENMASK(7, 0),
.mode = 0444,
},
};
static struct mlxreg_core_platform_data mlxplat_chassis_blade_regs_io_data = {
.data = mlxplat_mlxcpld_chassis_blade_regs_io_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_regs_io_data),
};
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
.label = "pwm1",
.reg = MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET,
},
{
.label = "pwm2",
.reg = MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET,
},
{
.label = "pwm3",
.reg = MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET,
},
{
.label = "pwm4",
.reg = MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET,
},
{
.label = "tacho1",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(0),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho2",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(1),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho3",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(2),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho4",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(3),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho5",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(4),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho6",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(5),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho7",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(6),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho8",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET,
.bit = BIT(7),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho9",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(0),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho10",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(1),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho11",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(2),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho12",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(3),
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
.label = "tacho13",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(4),
},
{
.label = "tacho14",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET,
.mask = GENMASK(7, 0),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(5),
},
{
.label = "conf",
.capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
},
};
static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.data = mlxplat_mlxcpld_default_fan_data,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
.capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
};
/* Watchdog type1: hardware implementation version1
* (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
*/
static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
.mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
.bit = 0,
},
{
.label = "reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
.mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
.bit = 1,
},
};
static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = {
{
.data = mlxplat_mlxcpld_wd_main_regs_type1,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1),
.version = MLX_WDT_TYPE1,
.identity = "mlx-wdt-main",
},
{
.data = mlxplat_mlxcpld_wd_aux_regs_type1,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1),
.version = MLX_WDT_TYPE1,
.identity = "mlx-wdt-aux",
},
};
/* Watchdog type2: hardware implementation version 2
* (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140).
*/
static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
};
static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
{
.data = mlxplat_mlxcpld_wd_main_regs_type2,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2),
.version = MLX_WDT_TYPE2,
.identity = "mlx-wdt-main",
},
{
.data = mlxplat_mlxcpld_wd_aux_regs_type2,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2),
.version = MLX_WDT_TYPE2,
.identity = "mlx-wdt-aux",
},
};
/* Watchdog type3: hardware implementation version 3
* Can be on all systems. It's differentiated by WD capability bit.
* Old systems (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140)
* still have only one main watchdog.
*/
static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type3[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type3[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
};
static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
{
.data = mlxplat_mlxcpld_wd_main_regs_type3,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type3),
.version = MLX_WDT_TYPE3,
.identity = "mlx-wdt-main",
},
{
.data = mlxplat_mlxcpld_wd_aux_regs_type3,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type3),
.version = MLX_WDT_TYPE3,
.identity = "mlx-wdt-aux",
},
};
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
}
return false;
}
static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
}
static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
return true;
}
return false;
}
static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
{ MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = {
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
};
static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = {
{ MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET,
MLXPLAT_CPLD_LOW_AGGRCX_MASK },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
};
static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
};
static const struct reg_default mlxplat_mlxcpld_regmap_rack_switch[] = {
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, MLXPLAT_REGMAP_NVSWITCH_PWM_DEFAULT },
{ MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
};
static const struct reg_default mlxplat_mlxcpld_regmap_eth_modular[] = {
{ MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, 0x61 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET,
MLXPLAT_CPLD_AGGR_MASK_LC_LOW },
};
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
static struct mlxplat_mlxcpld_regmap_context mlxplat_mlxcpld_regmap_ctx;
static int
mlxplat_mlxcpld_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct mlxplat_mlxcpld_regmap_context *ctx = context;
*val = ioread8(ctx->base + reg);
return 0;
}
static int
mlxplat_mlxcpld_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct mlxplat_mlxcpld_regmap_context *ctx = context;
iowrite8(val, ctx->base + reg);
return 0;
}
static const struct regmap_config mlxplat_mlxcpld_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_default,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_default),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_ng,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_comex_default,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_ng400,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static const struct regmap_config mlxplat_mlxcpld_regmap_config_rack_switch = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_rack_switch,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_rack_switch),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static const struct regmap_config mlxplat_mlxcpld_regmap_config_eth_modular = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 255,
.cache_type = REGCACHE_FLAT,
.writeable_reg = mlxplat_mlxcpld_writeable_reg,
.readable_reg = mlxplat_mlxcpld_readable_reg,
.volatile_reg = mlxplat_mlxcpld_volatile_reg,
.reg_defaults = mlxplat_mlxcpld_regmap_eth_modular,
.num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_eth_modular),
.reg_read = mlxplat_mlxcpld_reg_read,
.reg_write = mlxplat_mlxcpld_reg_write,
};
static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(MLXPLAT_CPLD_LPC_SYSIRQ, "mlxreg-hotplug"),
};
static struct mlxreg_core_hotplug_platform_data *mlxplat_i2c;
static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led;
static struct mlxreg_core_platform_data *mlxplat_regs_io;
static struct mlxreg_core_platform_data *mlxplat_fan;
static struct mlxreg_core_platform_data
*mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static const struct regmap_config *mlxplat_regmap_config;
static struct pci_dev *lpc_bridge;
static struct pci_dev *i2c_bridge;
static struct pci_dev *jtag_bridge;
/* Platform default reset function */
static int mlxplat_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
u32 regval;
int ret;
ret = regmap_read(priv->regmap, MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET, ®val);
if (action == SYS_RESTART && !ret && regval & MLXPLAT_CPLD_SYS_RESET_MASK)
regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET,
MLXPLAT_CPLD_RESET_MASK);
return NOTIFY_DONE;
}
static struct notifier_block mlxplat_reboot_default_nb = {
.notifier_call = mlxplat_reboot_notifier,
};
/* Platform default poweroff function */
static void mlxplat_poweroff(void)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
if (mlxplat_reboot_nb)
unregister_reboot_notifier(mlxplat_reboot_nb);
regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, MLXPLAT_CPLD_HALT_MASK);
kernel_halt();
}
static int __init mlxplat_register_platform_device(void)
{
mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
mlxplat_lpc_resources,
ARRAY_SIZE(mlxplat_lpc_resources));
if (IS_ERR(mlxplat_dev))
return PTR_ERR(mlxplat_dev);
else
return 1;
}
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
}
mlxplat_hotplug = &mlxplat_mlxcpld_default_data;
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
}
mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_wc_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_eth_wc_blade_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_msn21xx_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_msn274x_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data;
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data);
mlxplat_mux_data = mlxplat_extended_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_comex_data;
mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM;
mlxplat_led = &mlxplat_comex_100G_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_hotplug = &mlxplat_mlxcpld_ext_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_modular_mux_data);
mlxplat_mux_data = mlxplat_modular_mux_data;
mlxplat_hotplug = &mlxplat_mlxcpld_modular_data;
mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR;
mlxplat_led = &mlxplat_modular_led_data;
mlxplat_regs_io = &mlxplat_modular_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_eth_modular;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_chassis_blade_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
mlxplat_hotplug = &mlxplat_mlxcpld_chassis_blade_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
for (i = 0; i < mlxplat_mux_num; i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
mlxplat_regs_io = &mlxplat_chassis_blade_regs_io_data;
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_rack_switch_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_rack_switch_mux_data);
mlxplat_mux_data = mlxplat_rack_switch_mux_data;
mlxplat_hotplug = &mlxplat_mlxcpld_rack_switch_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_ng800_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_ng800_mux_data);
mlxplat_mux_data = mlxplat_ng800_mux_data;
mlxplat_hotplug = &mlxplat_mlxcpld_ng800_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_l1_switch_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_rack_switch_mux_data);
mlxplat_mux_data = mlxplat_rack_switch_mux_data;
mlxplat_hotplug = &mlxplat_mlxcpld_l1_switch_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_l1_switch_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
pm_power_off = mlxplat_poweroff;
mlxplat_reboot_nb = &mlxplat_reboot_default_nb;
return mlxplat_register_platform_device();
}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI138"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
},
},
{
.callback = mlxplat_dmi_msn21xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0002"),
},
},
{
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0003"),
},
},
{
.callback = mlxplat_dmi_msn201x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0004"),
},
},
{
.callback = mlxplat_dmi_default_eth_wc_blade_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI139"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"),
},
},
{
.callback = mlxplat_dmi_comex_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"),
},
},
{
.callback = mlxplat_dmi_rack_switch_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI142"),
},
},
{
.callback = mlxplat_dmi_ng400_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
},
},
{
.callback = mlxplat_dmi_modular_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0011"),
},
},
{
.callback = mlxplat_dmi_ng800_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0013"),
},
},
{
.callback = mlxplat_dmi_chassis_blade_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
},
},
{
.callback = mlxplat_dmi_l1_switch_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0017"),
},
},
{
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN274"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN24"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN27"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSB"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSX"),
},
},
{
.callback = mlxplat_dmi_msn21xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
},
},
{
.callback = mlxplat_dmi_msn201x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN201"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MQM87"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN37"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN34"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN38"),
},
},
{ }
};
MODULE_DEVICE_TABLE(dmi, mlxplat_dmi_table);
static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
{
struct i2c_adapter *search_adap;
int i, shift = 0;
/* Scan adapters from expected id to verify it is free. */
*nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR;
for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i <
mlxplat_max_adap_num; i++) {
search_adap = i2c_get_adapter(i);
if (search_adap) {
i2c_put_adapter(search_adap);
continue;
}
/* Return if expected parent adapter is free. */
if (i == MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR)
return 0;
break;
}
/* Return with error if free id for adapter is not found. */
if (i == mlxplat_max_adap_num)
return -ENODEV;
/* Shift adapter ids, since expected parent adapter is not free. */
*nr = i;
for (i = 0; i < mlxplat_mux_num; i++) {
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
}
if (shift > 0)
mlxplat_hotplug->shift_nr = shift;
return 0;
}
static int mlxplat_mlxcpld_check_wd_capability(void *regmap)
{
u32 regval;
int i, rc;
rc = regmap_read(regmap, MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
®val);
if (rc)
return rc;
if (!(regval & ~MLXPLAT_CPLD_WD_CPBLTY_MASK)) {
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++) {
if (mlxplat_wd_data[i])
mlxplat_wd_data[i] =
&mlxplat_mlxcpld_wd_set_type3[i];
}
}
return 0;
}
static int mlxplat_lpc_cpld_device_init(struct resource **hotplug_resources,
unsigned int *hotplug_resources_size)
{
int err;
mlxplat_mlxcpld_regmap_ctx.base = devm_ioport_map(&mlxplat_dev->dev,
mlxplat_lpc_resources[1].start, 1);
if (!mlxplat_mlxcpld_regmap_ctx.base) {
err = -ENOMEM;
goto fail_devm_ioport_map;
}
*hotplug_resources = mlxplat_mlxcpld_resources;
*hotplug_resources_size = ARRAY_SIZE(mlxplat_mlxcpld_resources);
return 0;
fail_devm_ioport_map:
return err;
}
static void mlxplat_lpc_cpld_device_exit(void)
{
}
static int
mlxplat_pci_fpga_device_init(unsigned int device, const char *res_name, struct pci_dev **pci_bridge,
void __iomem **pci_bridge_addr)
{
void __iomem *pci_mem_addr;
struct pci_dev *pci_dev;
int err;
pci_dev = pci_get_device(PCI_VENDOR_ID_LATTICE, device, NULL);
if (!pci_dev)
return -ENODEV;
err = pci_enable_device(pci_dev);
if (err) {
dev_err(&pci_dev->dev, "pci_enable_device failed with error %d\n", err);
goto fail_pci_enable_device;
}
err = pci_request_region(pci_dev, 0, res_name);
if (err) {
dev_err(&pci_dev->dev, "pci_request_regions failed with error %d\n", err);
goto fail_pci_request_regions;
}
err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
if (err) {
err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pci_dev->dev, "dma_set_mask failed with error %d\n", err);
goto fail_pci_set_dma_mask;
}
}
pci_set_master(pci_dev);
pci_mem_addr = devm_ioremap(&pci_dev->dev, pci_resource_start(pci_dev, 0),
pci_resource_len(pci_dev, 0));
if (!pci_mem_addr) {
dev_err(&mlxplat_dev->dev, "ioremap failed\n");
err = -EIO;
goto fail_ioremap;
}
*pci_bridge = pci_dev;
*pci_bridge_addr = pci_mem_addr;
return 0;
fail_ioremap:
fail_pci_set_dma_mask:
pci_release_regions(pci_dev);
fail_pci_request_regions:
pci_disable_device(pci_dev);
fail_pci_enable_device:
return err;
}
static void
mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
void __iomem *pci_bridge_addr)
{
iounmap(pci_bridge_addr);
pci_release_regions(pci_bridge);
pci_disable_device(pci_bridge);
}
static int
mlxplat_pci_fpga_devices_init(struct resource **hotplug_resources,
unsigned int *hotplug_resources_size)
{
int err;
err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_LPC_BRIDGE,
"mlxplat_lpc_bridge", &lpc_bridge,
&mlxplat_mlxcpld_regmap_ctx.base);
if (err)
goto mlxplat_pci_fpga_device_init_lpc_fail;
err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_I2C_BRIDGE,
"mlxplat_i2c_bridge", &i2c_bridge,
&i2c_bridge_addr);
if (err)
goto mlxplat_pci_fpga_device_init_i2c_fail;
err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_JTAG_BRIDGE,
"mlxplat_jtag_bridge", &jtag_bridge,
&jtag_bridge_addr);
if (err)
goto mlxplat_pci_fpga_device_init_jtag_fail;
return 0;
mlxplat_pci_fpga_device_init_jtag_fail:
mlxplat_pci_fpga_device_exit(i2c_bridge, i2c_bridge_addr);
mlxplat_pci_fpga_device_init_i2c_fail:
mlxplat_pci_fpga_device_exit(lpc_bridge, mlxplat_mlxcpld_regmap_ctx.base);
mlxplat_pci_fpga_device_init_lpc_fail:
return err;
}
static void mlxplat_pci_fpga_devices_exit(void)
{
mlxplat_pci_fpga_device_exit(jtag_bridge, jtag_bridge_addr);
mlxplat_pci_fpga_device_exit(i2c_bridge, i2c_bridge_addr);
mlxplat_pci_fpga_device_exit(lpc_bridge, mlxplat_mlxcpld_regmap_ctx.base);
}
static int
mlxplat_pre_init(struct resource **hotplug_resources, unsigned int *hotplug_resources_size)
{
int err;
err = mlxplat_pci_fpga_devices_init(hotplug_resources, hotplug_resources_size);
if (err == -ENODEV)
return mlxplat_lpc_cpld_device_init(hotplug_resources, hotplug_resources_size);
return err;
}
static void mlxplat_post_exit(void)
{
if (lpc_bridge)
mlxplat_pci_fpga_devices_exit();
else
mlxplat_lpc_cpld_device_exit();
}
static int mlxplat_post_init(struct mlxplat_priv *priv)
{
int i = 0, err;
/* Add hotplug driver */
if (mlxplat_hotplug) {
mlxplat_hotplug->regmap = priv->regmap;
if (priv->irq_fpga)
mlxplat_hotplug->irq = priv->irq_fpga;
priv->pdev_hotplug =
platform_device_register_resndata(&mlxplat_dev->dev,
"mlxreg-hotplug", PLATFORM_DEVID_NONE,
priv->hotplug_resources,
priv->hotplug_resources_size,
mlxplat_hotplug, sizeof(*mlxplat_hotplug));
if (IS_ERR(priv->pdev_hotplug)) {
err = PTR_ERR(priv->pdev_hotplug);
goto fail_platform_hotplug_register;
}
}
/* Add LED driver. */
if (mlxplat_led) {
mlxplat_led->regmap = priv->regmap;
priv->pdev_led =
platform_device_register_resndata(&mlxplat_dev->dev, "leds-mlxreg",
PLATFORM_DEVID_NONE, NULL, 0, mlxplat_led,
sizeof(*mlxplat_led));
if (IS_ERR(priv->pdev_led)) {
err = PTR_ERR(priv->pdev_led);
goto fail_platform_leds_register;
}
}
/* Add registers io access driver. */
if (mlxplat_regs_io) {
mlxplat_regs_io->regmap = priv->regmap;
priv->pdev_io_regs = platform_device_register_resndata(&mlxplat_dev->dev,
"mlxreg-io",
PLATFORM_DEVID_NONE, NULL,
0, mlxplat_regs_io,
sizeof(*mlxplat_regs_io));
if (IS_ERR(priv->pdev_io_regs)) {
err = PTR_ERR(priv->pdev_io_regs);
goto fail_platform_io_register;
}
}
/* Add FAN driver. */
if (mlxplat_fan) {
mlxplat_fan->regmap = priv->regmap;
priv->pdev_fan = platform_device_register_resndata(&mlxplat_dev->dev, "mlxreg-fan",
PLATFORM_DEVID_NONE, NULL, 0,
mlxplat_fan,
sizeof(*mlxplat_fan));
if (IS_ERR(priv->pdev_fan)) {
err = PTR_ERR(priv->pdev_fan);
goto fail_platform_fan_register;
}
}
/* Add WD drivers. */
err = mlxplat_mlxcpld_check_wd_capability(priv->regmap);
if (err)
goto fail_platform_wd_register;
for (i = 0; i < MLXPLAT_CPLD_WD_MAX_DEVS; i++) {
if (mlxplat_wd_data[i]) {
mlxplat_wd_data[i]->regmap = priv->regmap;
priv->pdev_wd[i] =
platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", i,
NULL, 0, mlxplat_wd_data[i],
sizeof(*mlxplat_wd_data[i]));
if (IS_ERR(priv->pdev_wd[i])) {
err = PTR_ERR(priv->pdev_wd[i]);
goto fail_platform_wd_register;
}
}
}
return 0;
fail_platform_wd_register:
while (--i >= 0)
platform_device_unregister(priv->pdev_wd[i]);
fail_platform_fan_register:
if (mlxplat_regs_io)
platform_device_unregister(priv->pdev_io_regs);
fail_platform_io_register:
if (mlxplat_led)
platform_device_unregister(priv->pdev_led);
fail_platform_leds_register:
if (mlxplat_hotplug)
platform_device_unregister(priv->pdev_hotplug);
fail_platform_hotplug_register:
return err;
}
static void mlxplat_pre_exit(struct mlxplat_priv *priv)
{
int i;
for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_wd[i]);
if (priv->pdev_fan)
platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs)
platform_device_unregister(priv->pdev_io_regs);
if (priv->pdev_led)
platform_device_unregister(priv->pdev_led);
if (priv->pdev_hotplug)
platform_device_unregister(priv->pdev_hotplug);
}
static int
mlxplat_i2c_mux_complition_notify(void *handle, struct i2c_adapter *parent,
struct i2c_adapter *adapters[])
{
struct mlxplat_priv *priv = handle;
return mlxplat_post_init(priv);
}
static int mlxplat_i2c_mux_topology_init(struct mlxplat_priv *priv)
{
int i, err;
if (!priv->pdev_i2c) {
priv->i2c_main_init_status = MLXPLAT_I2C_MAIN_BUS_NOTIFIED;
return 0;
}
priv->i2c_main_init_status = MLXPLAT_I2C_MAIN_BUS_HANDLE_CREATED;
for (i = 0; i < mlxplat_mux_num; i++) {
priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
"i2c-mux-reg", i, NULL, 0,
&mlxplat_mux_data[i],
sizeof(mlxplat_mux_data[i]));
if (IS_ERR(priv->pdev_mux[i])) {
err = PTR_ERR(priv->pdev_mux[i]);
goto fail_platform_mux_register;
}
}
return mlxplat_i2c_mux_complition_notify(priv, NULL, NULL);
fail_platform_mux_register:
while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
return err;
}
static void mlxplat_i2c_mux_topology_exit(struct mlxplat_priv *priv)
{
int i;
for (i = mlxplat_mux_num - 1; i >= 0 ; i--) {
if (priv->pdev_mux[i])
platform_device_unregister(priv->pdev_mux[i]);
}
}
static int mlxplat_i2c_main_complition_notify(void *handle, int id)
{
struct mlxplat_priv *priv = handle;
return mlxplat_i2c_mux_topology_init(priv);
}
static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
{
int nr, err;
if (!mlxplat_i2c)
return 0;
err = mlxplat_mlxcpld_verify_bus_topology(&nr);
if (nr < 0)
goto fail_mlxplat_mlxcpld_verify_bus_topology;
nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
mlxplat_i2c->regmap = priv->regmap;
mlxplat_i2c->handle = priv;
/* Set mapped base address of I2C-LPC bridge over PCIe */
if (lpc_bridge)
mlxplat_i2c->addr = i2c_bridge_addr;
priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
nr, priv->hotplug_resources,
priv->hotplug_resources_size,
mlxplat_i2c, sizeof(*mlxplat_i2c));
if (IS_ERR(priv->pdev_i2c)) {
err = PTR_ERR(priv->pdev_i2c);
goto fail_platform_i2c_register;
}
if (priv->i2c_main_init_status == MLXPLAT_I2C_MAIN_BUS_NOTIFIED) {
err = mlxplat_i2c_mux_topology_init(priv);
if (err)
goto fail_mlxplat_i2c_mux_topology_init;
}
return 0;
fail_mlxplat_i2c_mux_topology_init:
fail_platform_i2c_register:
fail_mlxplat_mlxcpld_verify_bus_topology:
return err;
}
static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
{
mlxplat_i2c_mux_topology_exit(priv);
if (priv->pdev_i2c)
platform_device_unregister(priv->pdev_i2c);
}
static int mlxplat_probe(struct platform_device *pdev)
{
unsigned int hotplug_resources_size = 0;
struct resource *hotplug_resources = NULL;
struct acpi_device *acpi_dev;
struct mlxplat_priv *priv;
int irq_fpga = 0, i, err;
acpi_dev = ACPI_COMPANION(&pdev->dev);
if (acpi_dev) {
irq_fpga = acpi_dev_gpio_irq_get(acpi_dev, 0);
if (irq_fpga < 0)
return -ENODEV;
mlxplat_dev = pdev;
}
err = mlxplat_pre_init(&hotplug_resources, &hotplug_resources_size);
if (err)
return err;
priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
goto fail_alloc;
}
platform_set_drvdata(mlxplat_dev, priv);
priv->hotplug_resources = hotplug_resources;
priv->hotplug_resources_size = hotplug_resources_size;
priv->irq_fpga = irq_fpga;
if (!mlxplat_regmap_config)
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config;
priv->regmap = devm_regmap_init(&mlxplat_dev->dev, NULL,
&mlxplat_mlxcpld_regmap_ctx,
mlxplat_regmap_config);
if (IS_ERR(priv->regmap)) {
err = PTR_ERR(priv->regmap);
goto fail_alloc;
}
/* Set default registers. */
for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
err = regmap_write(priv->regmap,
mlxplat_regmap_config->reg_defaults[i].reg,
mlxplat_regmap_config->reg_defaults[i].def);
if (err)
goto fail_regmap_write;
}
err = mlxplat_i2c_main_init(priv);
if (err)
goto fail_mlxplat_i2c_main_init;
/* Sync registers with hardware. */
regcache_mark_dirty(priv->regmap);
err = regcache_sync(priv->regmap);
if (err)
goto fail_regcache_sync;
if (mlxplat_reboot_nb) {
err = register_reboot_notifier(mlxplat_reboot_nb);
if (err)
goto fail_register_reboot_notifier;
}
return 0;
fail_register_reboot_notifier:
fail_regcache_sync:
mlxplat_pre_exit(priv);
fail_mlxplat_i2c_main_init:
fail_regmap_write:
fail_alloc:
mlxplat_post_exit();
return err;
}
static int mlxplat_remove(struct platform_device *pdev)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
if (pm_power_off)
pm_power_off = NULL;
if (mlxplat_reboot_nb)
unregister_reboot_notifier(mlxplat_reboot_nb);
mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
mlxplat_post_exit();
return 0;
}
static const struct acpi_device_id mlxplat_acpi_table[] = {
{ "MLNXBF49", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, mlxplat_acpi_table);
static struct platform_driver mlxplat_driver = {
.driver = {
.name = "mlxplat",
.acpi_match_table = mlxplat_acpi_table,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = mlxplat_probe,
.remove = mlxplat_remove,
};
static int __init mlxplat_init(void)
{
int err;
if (!dmi_check_system(mlxplat_dmi_table))
return -ENODEV;
err = platform_driver_register(&mlxplat_driver);
if (err)
return err;
return 0;
}
module_init(mlxplat_init);
static void __exit mlxplat_exit(void)
{
if (mlxplat_dev)
platform_device_unregister(mlxplat_dev);
platform_driver_unregister(&mlxplat_driver);
}
module_exit(mlxplat_exit);
MODULE_AUTHOR("Vadim Pasternak ([email protected])");
MODULE_DESCRIPTION("Mellanox platform driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/platform/x86/mlx-platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MXM WMI driver
*
* Copyright(C) 2010 Red Hat.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mxm-wmi.h>
#include <linux/acpi.h>
MODULE_AUTHOR("Dave Airlie");
MODULE_DESCRIPTION("MXM WMI Driver");
MODULE_LICENSE("GPL");
#define MXM_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
MODULE_ALIAS("wmi:"MXM_WMMX_GUID);
#define MXM_WMMX_FUNC_MXDS 0x5344584D /* "MXDS" */
#define MXM_WMMX_FUNC_MXMX 0x53445344 /* "MXMX" */
struct mxds_args {
u32 func;
u32 args;
u32 xarg;
};
int mxm_wmi_call_mxds(int adapter)
{
struct mxds_args args = {
.func = MXM_WMMX_FUNC_MXDS,
.args = 0,
.xarg = 1,
};
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
acpi_status status;
printk("calling mux switch %d\n", adapter);
status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
if (ACPI_FAILURE(status))
return status;
printk("mux switched %d\n", status);
return 0;
}
EXPORT_SYMBOL_GPL(mxm_wmi_call_mxds);
int mxm_wmi_call_mxmx(int adapter)
{
struct mxds_args args = {
.func = MXM_WMMX_FUNC_MXMX,
.args = 0,
.xarg = 1,
};
struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
acpi_status status;
printk("calling mux switch %d\n", adapter);
status = wmi_evaluate_method(MXM_WMMX_GUID, 0x0, adapter, &input, NULL);
if (ACPI_FAILURE(status))
return status;
printk("mux mutex set switched %d\n", status);
return 0;
}
EXPORT_SYMBOL_GPL(mxm_wmi_call_mxmx);
bool mxm_wmi_supported(void)
{
bool guid_valid;
guid_valid = wmi_has_guid(MXM_WMMX_GUID);
return guid_valid;
}
EXPORT_SYMBOL_GPL(mxm_wmi_supported);
static int __init mxm_wmi_init(void)
{
return 0;
}
static void __exit mxm_wmi_exit(void)
{
}
module_init(mxm_wmi_init);
module_exit(mxm_wmi_exit);
| linux-master | drivers/platform/x86/mxm-wmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Touchscreen driver DMI based configuration code
*
* Copyright (c) 2017 Red Hat Inc.
*
* Red Hat authors:
* Hans de Goede <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/efi_embedded_fw.h>
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/string.h>
struct ts_dmi_data {
/* The EFI embedded-fw code expects this to be the first member! */
struct efi_embedded_fw_desc embedded_fw;
const char *acpi_name;
const struct property_entry *properties;
};
/* NOTE: Please keep all entries sorted alphabetically */
static const struct property_entry archos_101_cesium_educ_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1850),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"),
{ }
};
static const struct ts_dmi_data archos_101_cesium_educ_data = {
.acpi_name = "MSSL1680:00",
.properties = archos_101_cesium_educ_props,
};
static const struct property_entry chuwi_hi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi8.fw"),
{ }
};
static const struct ts_dmi_data chuwi_hi8_data = {
.acpi_name = "MSSL0001:00",
.properties = chuwi_hi8_props,
};
static const struct property_entry chuwi_hi8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
.acpi_name = "MSSL1680:00",
.properties = chuwi_hi8_air_props,
};
static const struct property_entry chuwi_hi8_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 6),
PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data chuwi_hi8_pro_data = {
.embedded_fw = {
.name = "silead/gsl3680-chuwi-hi8-pro.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 39864,
.sha256 = { 0xc0, 0x88, 0xc5, 0xef, 0xd1, 0x70, 0x77, 0x59,
0x4e, 0xe9, 0xc4, 0xd8, 0x2e, 0xcd, 0xbf, 0x95,
0x32, 0xd9, 0x03, 0x28, 0x0d, 0x48, 0x9f, 0x92,
0x35, 0x37, 0xf6, 0x8b, 0x2a, 0xe4, 0x73, 0xff },
},
.acpi_name = "MSSL1680:00",
.properties = chuwi_hi8_pro_props,
};
static const struct property_entry chuwi_hi10_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1271),
PROPERTY_ENTRY_U32("touchscreen-min-x", 99),
PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5),
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data chuwi_hi10_air_data = {
.acpi_name = "MSSL1680:00",
.properties = chuwi_hi10_air_props,
};
static const struct property_entry chuwi_hi10_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
{ }
};
static const struct ts_dmi_data chuwi_hi10_plus_data = {
.embedded_fw = {
.name = "silead/gsl1680-chuwi-hi10plus.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 34056,
.sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
},
.acpi_name = "MSSL0017:00",
.properties = chuwi_hi10_plus_props,
};
static const u32 chuwi_hi10_pro_efi_min_max[] = { 8, 1911, 8, 1271 };
static const struct property_entry chuwi_hi10_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 80),
PROPERTY_ENTRY_U32("touchscreen-min-y", 26),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1962),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1254),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("silead,pen-supported"),
PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8),
PROPERTY_ENTRY_U32("silead,pen-resolution-y", 8),
{ }
};
static const struct ts_dmi_data chuwi_hi10_pro_data = {
.embedded_fw = {
.name = "silead/gsl1680-chuwi-hi10-pro.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 42504,
.sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
},
.acpi_name = "MSSL1680:00",
.properties = chuwi_hi10_pro_props,
};
static const struct property_entry chuwi_hibook_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data chuwi_hibook_data = {
.embedded_fw = {
.name = "silead/gsl1680-chuwi-hibook.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 40392,
.sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
},
.acpi_name = "MSSL0017:00",
.properties = chuwi_hibook_props,
};
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1724),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data chuwi_vi8_data = {
.acpi_name = "MSSL1680:00",
.properties = chuwi_vi8_props,
};
static const struct ts_dmi_data chuwi_vi8_plus_data = {
.embedded_fw = {
.name = "chipone/icn8505-HAMP0002.fw",
.prefix = { 0xb0, 0x07, 0x00, 0x00, 0xe4, 0x07, 0x00, 0x00 },
.length = 35012,
.sha256 = { 0x93, 0xe5, 0x49, 0xe0, 0xb6, 0xa2, 0xb4, 0xb3,
0x88, 0x96, 0x34, 0x97, 0x5e, 0xa8, 0x13, 0x78,
0x72, 0x98, 0xb8, 0x29, 0xeb, 0x5c, 0xa7, 0xf1,
0x25, 0x13, 0x43, 0xf4, 0x30, 0x7c, 0xfc, 0x7c },
},
};
static const struct property_entry chuwi_vi10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1858),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data chuwi_vi10_data = {
.acpi_name = "MSSL0002:00",
.properties = chuwi_vi10_props,
};
static const struct property_entry chuwi_surbook_mini_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 88),
PROPERTY_ENTRY_U32("touchscreen-min-y", 13),
PROPERTY_ENTRY_U32("touchscreen-size-x", 2040),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1524),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
};
static const struct ts_dmi_data chuwi_surbook_mini_data = {
.acpi_name = "MSSL1680:00",
.properties = chuwi_surbook_mini_props,
};
static const struct property_entry connect_tablet9_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 9),
PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data connect_tablet9_data = {
.acpi_name = "MSSL1680:00",
.properties = connect_tablet9_props,
};
static const struct property_entry csl_panther_tab_hd_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data csl_panther_tab_hd_data = {
.acpi_name = "MSSL1680:00",
.properties = csl_panther_tab_hd_props,
};
static const struct property_entry cube_iwork8_air_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data cube_iwork8_air_data = {
.embedded_fw = {
.name = "silead/gsl3670-cube-iwork8-air.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 38808,
.sha256 = { 0xff, 0x62, 0x2d, 0xd1, 0x8a, 0x78, 0x04, 0x7b,
0x33, 0x06, 0xb0, 0x4f, 0x7f, 0x02, 0x08, 0x9c,
0x96, 0xd4, 0x9f, 0x04, 0xe1, 0x47, 0x25, 0x25,
0x60, 0x77, 0x41, 0x33, 0xeb, 0x12, 0x82, 0xfc },
},
.acpi_name = "MSSL1680:00",
.properties = cube_iwork8_air_props,
};
static const struct property_entry cube_knote_i1101_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1961),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1513),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data cube_knote_i1101_data = {
.acpi_name = "MSSL1680:00",
.properties = cube_knote_i1101_props,
};
static const struct property_entry dexp_ursus_7w_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data dexp_ursus_7w_data = {
.acpi_name = "MSSL1680:00",
.properties = dexp_ursus_7w_props,
};
static const struct property_entry dexp_ursus_kx210i_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data dexp_ursus_kx210i_data = {
.acpi_name = "MSSL1680:00",
.properties = dexp_ursus_kx210i_props,
};
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data digma_citi_e200_data = {
.acpi_name = "MSSL1680:00",
.properties = digma_citi_e200_props,
};
static const struct property_entry estar_beauty_hd_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
{ }
};
static const struct ts_dmi_data estar_beauty_hd_data = {
.acpi_name = "GDIX1001:00",
.properties = estar_beauty_hd_props,
};
/* Generic props + data for upside-down mounted GDIX1001 touchscreens */
static const struct property_entry gdix1001_upside_down_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
};
static const struct ts_dmi_data gdix1001_00_upside_down_data = {
.acpi_name = "GDIX1001:00",
.properties = gdix1001_upside_down_props,
};
static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.acpi_name = "GDIX1001:01",
.properties = gdix1001_upside_down_props,
};
static const struct ts_dmi_data gdix1002_00_upside_down_data = {
.acpi_name = "GDIX1002:00",
.properties = gdix1001_upside_down_props,
};
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"),
{ }
};
static const struct ts_dmi_data gp_electronic_t701_data = {
.acpi_name = "MSSL1680:00",
.properties = gp_electronic_t701_props,
};
static const struct property_entry irbis_tw90_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data irbis_tw90_data = {
.acpi_name = "MSSL1680:00",
.properties = irbis_tw90_props,
};
static const struct property_entry irbis_tw118_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
PROPERTY_ENTRY_U32("touchscreen-min-y", 30),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1510),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data irbis_tw118_data = {
.acpi_name = "MSSL1680:00",
.properties = irbis_tw118_props,
};
static const struct property_entry itworks_tw891_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
PROPERTY_ENTRY_U32("touchscreen-size-y", 896),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data itworks_tw891_data = {
.acpi_name = "MSSL1680:00",
.properties = itworks_tw891_props,
};
static const struct property_entry jumper_ezpad_6_pro_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_6_pro_props,
};
static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_6_pro_b_props,
};
static const struct property_entry jumper_ezpad_6_m4_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data jumper_ezpad_6_m4_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_6_m4_props,
};
static const struct property_entry jumper_ezpad_7_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
PROPERTY_ENTRY_U32("touchscreen-size-x", 2044),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"),
{ }
};
static const struct ts_dmi_data jumper_ezpad_7_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_7_props,
};
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1700),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1138),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_mini3_props,
};
static const struct property_entry mpman_converter9_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1664),
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data mpman_converter9_data = {
.acpi_name = "MSSL1680:00",
.properties = mpman_converter9_props,
};
static const struct property_entry mpman_mpwin895cl_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
PROPERTY_ENTRY_U32("touchscreen-min-y", 9),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data mpman_mpwin895cl_data = {
.acpi_name = "MSSL1680:00",
.properties = mpman_mpwin895cl_props,
};
static const struct property_entry myria_my8307_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data myria_my8307_data = {
.acpi_name = "MSSL1680:00",
.properties = myria_my8307_props,
};
static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_obook_20_plus_data = {
.acpi_name = "MSSL1680:00",
.properties = onda_obook_20_plus_props,
};
static const struct property_entry onda_v80_plus_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 22),
PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1698),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_v80_plus_v3_data = {
.embedded_fw = {
.name = "silead/gsl3676-onda-v80-plus-v3.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 37224,
.sha256 = { 0x8f, 0xbd, 0x8f, 0x0c, 0x6b, 0xba, 0x5b, 0xf5,
0xa3, 0xc7, 0xa3, 0xc0, 0x4f, 0xcd, 0xdf, 0x32,
0xcc, 0xe4, 0x70, 0xd6, 0x46, 0x9c, 0xd7, 0xa7,
0x4b, 0x82, 0x3f, 0xab, 0xc7, 0x90, 0xea, 0x23 },
},
.acpi_name = "MSSL1680:00",
.properties = onda_v80_plus_v3_props,
};
static const struct property_entry onda_v820w_32g_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1665),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_v820w_32g_data = {
.acpi_name = "MSSL1680:00",
.properties = onda_v820w_32g_props,
};
static const struct property_entry onda_v891_v5_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3676-onda-v891-v5.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_v891_v5_data = {
.acpi_name = "MSSL1680:00",
.properties = onda_v891_v5_props,
};
static const struct property_entry onda_v891w_v1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 46),
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1676),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1130),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_v891w_v1_data = {
.acpi_name = "MSSL1680:00",
.properties = onda_v891w_v1_props,
};
static const struct property_entry onda_v891w_v3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1625),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1135),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data onda_v891w_v3_data = {
.acpi_name = "MSSL1680:00",
.properties = onda_v891w_v3_props,
};
static const struct property_entry pipo_w2s_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"),
{ }
};
static const struct ts_dmi_data pipo_w2s_data = {
.embedded_fw = {
.name = "silead/gsl1680-pipo-w2s.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 39072,
.sha256 = { 0xd0, 0x58, 0xc4, 0x7d, 0x55, 0x2d, 0x62, 0x18,
0xd1, 0x6a, 0x71, 0x73, 0x0b, 0x3f, 0xbe, 0x60,
0xbb, 0x45, 0x8c, 0x52, 0x27, 0xb7, 0x18, 0xf4,
0x31, 0x00, 0x6a, 0x49, 0x76, 0xd8, 0x7c, 0xd3 },
},
.acpi_name = "MSSL1680:00",
.properties = pipo_w2s_props,
};
static const struct property_entry pipo_w11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1532),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data pipo_w11_data = {
.acpi_name = "MSSL1680:00",
.properties = pipo_w11_props,
};
static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 32),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1692),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1146),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data pov_mobii_wintab_p800w_v20_data = {
.acpi_name = "MSSL1680:00",
.properties = pov_mobii_wintab_p800w_v20_props,
};
static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1794),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data pov_mobii_wintab_p800w_v21_data = {
.acpi_name = "MSSL1680:00",
.properties = pov_mobii_wintab_p800w_v21_props,
};
static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1984),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1520),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
.acpi_name = "MSSL1680:00",
.properties = pov_mobii_wintab_p1006w_v10_props,
};
static const struct property_entry predia_basic_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 3),
PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1144),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data predia_basic_data = {
.acpi_name = "MSSL1680:00",
.properties = predia_basic_props,
};
static const struct property_entry rca_cambio_w101_v2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1644),
PROPERTY_ENTRY_U32("touchscreen-size-y", 874),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data rca_cambio_w101_v2_data = {
.acpi_name = "MSSL1680:00",
.properties = rca_cambio_w101_v2_props,
};
static const struct property_entry rwc_nanote_p8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data rwc_nanote_p8_data = {
.acpi_name = "MSSL1680:00",
.properties = rwc_nanote_p8_props,
};
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data schneider_sct101ctm_data = {
.acpi_name = "MSSL1680:00",
.properties = schneider_sct101ctm_props,
};
static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1981),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data techbite_arc_11_6_data = {
.acpi_name = "MSSL1680:00",
.properties = techbite_arc_11_6_props,
};
static const struct property_entry teclast_tbook11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data teclast_tbook11_data = {
.embedded_fw = {
.name = "silead/gsl3692-teclast-tbook11.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 43560,
.sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
},
.acpi_name = "MSSL1680:00",
.properties = teclast_tbook11_props,
};
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data teclast_x3_plus_data = {
.acpi_name = "MSSL1680:00",
.properties = teclast_x3_plus_props,
};
static const struct property_entry teclast_x98plus2_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2048),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
{ }
};
static const struct ts_dmi_data teclast_x98plus2_data = {
.acpi_name = "MSSL1680:00",
.properties = teclast_x98plus2_props,
};
static const struct property_entry trekstor_primebook_c11_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1970),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1530),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data trekstor_primebook_c11_data = {
.acpi_name = "MSSL1680:00",
.properties = trekstor_primebook_c11_props,
};
static const struct property_entry trekstor_primebook_c13_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2624),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data trekstor_primebook_c13_data = {
.acpi_name = "MSSL1680:00",
.properties = trekstor_primebook_c13_props,
};
static const struct property_entry trekstor_primetab_t13b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 2500),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1900),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
{ }
};
static const struct ts_dmi_data trekstor_primetab_t13b_data = {
.acpi_name = "MSSL1680:00",
.properties = trekstor_primetab_t13b_props,
};
static const struct property_entry trekstor_surftab_duo_w1_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
{ }
};
static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
.acpi_name = "GDIX1001:00",
.properties = trekstor_surftab_duo_w1_props,
};
static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data trekstor_surftab_twin_10_1_data = {
.acpi_name = "MSSL1680:00",
.properties = trekstor_surftab_twin_10_1_props,
};
static const struct property_entry trekstor_surftab_wintron70_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
PROPERTY_ENTRY_U32("touchscreen-size-x", 884),
PROPERTY_ENTRY_U32("touchscreen-size-y", 632),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
.acpi_name = "MSSL1680:00",
.properties = trekstor_surftab_wintron70_props,
};
static const struct property_entry viglen_connect_10_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 6),
PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data viglen_connect_10_data = {
.acpi_name = "MSSL1680:00",
.properties = viglen_connect_10_props,
};
static const struct property_entry vinga_twizzle_j116_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data vinga_twizzle_j116_data = {
.acpi_name = "MSSL1680:00",
.properties = vinga_twizzle_j116_props,
};
/* NOTE: Please keep this table sorted alphabetically */
const struct dmi_system_id touchscreen_dmi_table[] = {
{
/* Archos 101 Cesium Educ */
.driver_data = (void *)&archos_101_cesium_educ_data,
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ARCHOS 101 Cesium Educ"),
},
},
{
/* Chuwi Hi8 */
.driver_data = (void *)&chuwi_hi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
},
},
{
/* Chuwi Hi8 (H1D_S806_206) */
.driver_data = (void *)&chuwi_hi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
DMI_MATCH(DMI_BIOS_VERSION, "H1D_S806_206"),
},
},
{
/* Chuwi Hi8 Air (CWI543) */
.driver_data = (void *)&chuwi_hi8_air_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_MATCH(DMI_PRODUCT_NAME, "Hi8 Air"),
},
},
{
/* Chuwi Hi8 Pro (CWI513) */
.driver_data = (void *)&chuwi_hi8_pro_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
},
},
{
/* Chuwi Hi10 Air */
.driver_data = (void *)&chuwi_hi10_air_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CHUWI INNOVATION AND TECHNOLOGY(SHENZHEN)CO.LTD"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_MATCH(DMI_PRODUCT_SKU, "P1W6_C109D_B"),
},
},
{
/* Chuwi Hi10 Plus (CWI527) */
.driver_data = (void *)&chuwi_hi10_plus_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 plus tablet"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
/* Chuwi Hi10 Pro (CWI529) */
.driver_data = (void *)&chuwi_hi10_pro_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
/* Chuwi HiBook (CWI514) */
.driver_data = (void *)&chuwi_hibook_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
},
},
{
/* Chuwi Vi8 (CWI501) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
},
},
{
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.D86JLBNR"),
},
},
{
/* Chuwi Vi8 Plus (CWI519) */
.driver_data = (void *)&chuwi_vi8_plus_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
DMI_MATCH(DMI_PRODUCT_NAME, "D2D3_Vi8A1"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
/* Chuwi Vi10 (CWI505) */
.driver_data = (void *)&chuwi_vi10_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_BOARD_NAME, "BYT-PF02"),
DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
DMI_MATCH(DMI_PRODUCT_NAME, "S165"),
},
},
{
/* Chuwi Surbook Mini (CWI540) */
.driver_data = (void *)&chuwi_surbook_mini_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_PRODUCT_NAME, "C3W6_AP108_4G"),
},
},
{
/* Connect Tablet 9 */
.driver_data = (void *)&connect_tablet9_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Connect"),
DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
},
},
{
/* CSL Panther Tab HD */
.driver_data = (void *)&csl_panther_tab_hd_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
},
},
{
/* CUBE iwork8 Air */
.driver_data = (void *)&cube_iwork8_air_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "cube"),
DMI_MATCH(DMI_PRODUCT_NAME, "i1-TF"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
/* Cube KNote i1101 */
.driver_data = (void *)&cube_knote_i1101_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_BOARD_NAME, "L1W6_I1101"),
DMI_MATCH(DMI_SYS_VENDOR, "ALLDOCUBE"),
DMI_MATCH(DMI_PRODUCT_NAME, "i1101"),
},
},
{
/* DEXP Ursus 7W */
.driver_data = (void *)&dexp_ursus_7w_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
},
},
{
/* DEXP Ursus KX210i */
.driver_data = (void *)&dexp_ursus_kx210i_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
},
},
{
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
},
},
{
/* Estar Beauty HD (MID 7316R) */
.driver_data = (void *)&estar_beauty_hd_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
},
},
{
/* GP-electronic T701 */
.driver_data = (void *)&gp_electronic_t701_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
},
{
/* I.T.Works TW701 (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "i71c"),
DMI_MATCH(DMI_BIOS_VERSION, "itWORKS.G.WI71C.JGBMRB"),
},
},
{
/* Irbis TW90 */
.driver_data = (void *)&irbis_tw90_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW90"),
},
},
{
/* Irbis TW118 */
.driver_data = (void *)&irbis_tw118_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW118"),
},
},
{
/* I.T.Works TW891 */
.driver_data = (void *)&itworks_tw891_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
},
},
{
/* Jumper EZpad 6 Pro */
.driver_data = (void *)&jumper_ezpad_6_pro_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"),
},
},
{
/* Jumper EZpad 6 Pro B */
.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
},
},
{
/* Jumper EZpad 6 m4 */
.driver_data = (void *)&jumper_ezpad_6_m4_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
/* Jumper8.S106x.A00C.1066 with the version dropped */
DMI_MATCH(DMI_BIOS_VERSION, "Jumper8.S106x"),
},
},
{
/* Jumper EZpad 7 */
.driver_data = (void *)&jumper_ezpad_7_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
/* Jumper12x.WJ2012.bsBKRCP05 with the version dropped */
DMI_MATCH(DMI_BIOS_VERSION, "Jumper12x.WJ2012.bsBKRCP"),
},
},
{
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
/* jumperx.T87.KFBNEEA02 with the version-nr dropped */
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
},
},
{
/* Juno Tablet */
.driver_data = (void *)&gdix1002_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
/* Above matches are too generic, add partial bios-version match */
DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
},
},
{
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
},
},
{
/* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
},
},
{
/* MP Man Converter 9 */
.driver_data = (void *)&mpman_converter9_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
},
},
{
/* MP Man MPWIN895CL */
.driver_data = (void *)&mpman_mpwin895cl_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
DMI_MATCH(DMI_PRODUCT_NAME, "MPWIN8900CL"),
},
},
{
/* Myria MY8307 */
.driver_data = (void *)&myria_my8307_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"),
DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"),
},
},
{
/* Onda oBook 20 Plus */
.driver_data = (void *)&onda_obook_20_plus_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ONDA"),
DMI_MATCH(DMI_PRODUCT_NAME, "OBOOK 20 PLUS"),
},
},
{
/* ONDA V80 plus v3 (P80PSBG9V3A01501) */
.driver_data = (void *)&onda_v80_plus_v3_data,
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONDA"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V80 PLUS")
},
},
{
/* ONDA V820w DualOS */
.driver_data = (void *)&onda_v820w_32g_data,
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "V820w DualOS")
},
},
{
/* ONDA V891 v5 */
.driver_data = (void *)&onda_v891_v5_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ONDA"),
DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D869CJABNRBA06"),
},
},
{
/* ONDA V891w revision P891WBEBV1B00 aka v1 */
.driver_data = (void *)&onda_v891w_v1_data,
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ONDA"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONDA Tablet"),
DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V001"),
/* Exact match, different versions need different fw */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "ONDA.W89EBBN08"),
},
},
{
/* ONDA V891w Dual OS P891DCF2V1A01274 64GB */
.driver_data = (void *)&onda_v891w_v3_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "ONDA Tablet"),
DMI_MATCH(DMI_BIOS_VERSION, "ONDA.D890HBBNR0A"),
},
},
{
/* Pipo W2S */
.driver_data = (void *)&pipo_w2s_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
},
},
{
/* Pipo W11 */
.driver_data = (void *)&pipo_w11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
/* Above matches are too generic, add bios-ver match */
DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"),
},
},
{
/* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"),
DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"),
},
},
{
/* Point of View mobii wintab p800w (v2.0) */
.driver_data = (void *)&pov_mobii_wintab_p800w_v20_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1014"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "10/24/2014"),
},
},
{
/* Predia Basic tablet) */
.driver_data = (void *)&predia_basic_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
/* Above matches are too generic, add bios-version match */
DMI_MATCH(DMI_BIOS_VERSION, "Mx.WT107.KUBNGEA"),
},
},
{
/* Point of View mobii wintab p800w (v2.1) */
.driver_data = (void *)&pov_mobii_wintab_p800w_v21_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
},
},
{
/* Point of View mobii wintab p1006w (v1.0) */
.driver_data = (void *)&pov_mobii_wintab_p1006w_v10_data,
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
/* Note 105b is Foxcon's USB/PCI vendor id */
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
},
},
{
/* RCA Cambio W101 v2 */
/* https://github.com/onitake/gsl-firmware/discussions/193 */
.driver_data = (void *)&rca_cambio_w101_v2_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "RCA"),
DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"),
},
},
{
/* RWC NANOTE P8 */
.driver_data = (void *)&rwc_nanote_p8_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
DMI_MATCH(DMI_PRODUCT_SKU, "0001")
},
},
{
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
},
},
{
/* Techbite Arc 11.6 */
.driver_data = (void *)&techbite_arc_11_6_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "mPTech"),
DMI_MATCH(DMI_PRODUCT_NAME, "techBite Arc 11.6"),
DMI_MATCH(DMI_BOARD_NAME, "G8316_272B"),
},
},
{
/* Teclast Tbook 11 */
.driver_data = (void *)&teclast_tbook11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
DMI_MATCH(DMI_PRODUCT_NAME, "TbooK 11"),
DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
},
},
{
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
DMI_MATCH(DMI_PRODUCT_NAME, "X3 Plus"),
DMI_MATCH(DMI_BOARD_NAME, "X3 Plus"),
},
},
{
/* Teclast X89 (Android version / BIOS) */
.driver_data = (void *)&gdix1001_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
},
},
{
/* Teclast X89 (Windows version / BIOS) */
.driver_data = (void *)&gdix1001_01_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
},
},
{
/* Teclast X98 Plus II */
.driver_data = (void *)&teclast_x98plus2_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
DMI_MATCH(DMI_PRODUCT_NAME, "X98 Plus II"),
},
},
{
/* Teclast X98 Pro */
.driver_data = (void *)&gdix1001_00_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
* BIOS does not report the board name at all
* (sometimes)...
*/
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
},
},
{
/* Trekstor Primebook C11 */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
},
},
{
/* Trekstor Primebook C11B (same touchscreen as the C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_MATCH(DMI_PRODUCT_NAME, "PRIMEBOOK C11B"),
},
},
{
/* Trekstor Primebook C13 */
.driver_data = (void *)&trekstor_primebook_c13_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
},
},
{
/* Trekstor Primetab T13B */
.driver_data = (void *)&trekstor_primetab_t13b_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
},
},
{
/* TrekStor SurfTab duo W1 10.1 ST10432-10b */
.driver_data = (void *)&trekstor_surftab_duo_w1_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
},
},
{
/* TrekStor SurfTab twin 10.1 ST10432-8 */
.driver_data = (void *)&trekstor_surftab_twin_10_1_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab twin 10.1"),
},
},
{
/* Trekstor Surftab Wintron 7.0 ST70416-6 */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
},
},
{
/* Trekstor Surftab Wintron 7.0 ST70416-6, newer BIOS */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"),
/* Exact match, different versions need different fw */
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"),
},
},
{
/* Trekstor Yourbook C11B (same touchscreen as the Primebook C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_MATCH(DMI_PRODUCT_NAME, "YOURBOOK C11B"),
},
},
{
/* Viglen Connect 10 */
.driver_data = (void *)&viglen_connect_10_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Viglen Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "Connect 10'' Tablet PC"),
},
},
{
/* Vinga Twizzle J116 */
.driver_data = (void *)&vinga_twizzle_j116_data,
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "VINGA Twizzle J116"),
},
},
{
/* "WinBook TW100" */
.driver_data = (void *)&gdix1001_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
}
},
{
/* WinBook TW700 */
.driver_data = (void *)&gdix1001_00_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
},
},
{
/* Yours Y8W81, same case and touchscreen as Chuwi Vi8 */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "YOURS"),
DMI_MATCH(DMI_PRODUCT_NAME, "Y8W81"),
},
},
{ }
};
static const struct ts_dmi_data *ts_data;
static void ts_dmi_add_props(struct i2c_client *client)
{
struct device *dev = &client->dev;
int error;
if (has_acpi_companion(dev) &&
!strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
}
}
static int ts_dmi_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct i2c_client *client;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
client = i2c_verify_client(dev);
if (client)
ts_dmi_add_props(client);
break;
default:
break;
}
return 0;
}
static struct notifier_block ts_dmi_notifier = {
.notifier_call = ts_dmi_notifier_call,
};
static int __init ts_dmi_init(void)
{
const struct dmi_system_id *dmi_id;
int error;
dmi_id = dmi_first_match(touchscreen_dmi_table);
if (!dmi_id)
return 0; /* Not an error */
ts_data = dmi_id->driver_data;
/* Some dmi table entries only provide an efi_embedded_fw_desc */
if (!ts_data->properties)
return 0;
error = bus_register_notifier(&i2c_bus_type, &ts_dmi_notifier);
if (error)
pr_err("%s: failed to register i2c bus notifier: %d\n",
__func__, error);
return error;
}
/*
* We are registering out notifier after i2c core is initialized and i2c bus
* itself is ready (which happens at postcore initcall level), but before
* ACPI starts enumerating devices (at subsys initcall level).
*/
arch_initcall(ts_dmi_init);
| linux-master | drivers/platform/x86/touchscreen_dmi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Serial multi-instantiate driver, pseudo driver to instantiate multiple
* client devices from a single fwnode.
*
* Copyright 2018 Hans de Goede <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#define IRQ_RESOURCE_TYPE GENMASK(1, 0)
#define IRQ_RESOURCE_NONE 0
#define IRQ_RESOURCE_GPIO 1
#define IRQ_RESOURCE_APIC 2
#define IRQ_RESOURCE_AUTO 3
enum smi_bus_type {
SMI_I2C,
SMI_SPI,
SMI_AUTO_DETECT,
};
struct smi_instance {
const char *type;
unsigned int flags;
int irq_idx;
};
struct smi_node {
enum smi_bus_type bus_type;
struct smi_instance instances[];
};
struct smi {
int i2c_num;
int spi_num;
struct i2c_client **i2c_devs;
struct spi_device **spi_devs;
};
static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
const struct smi_instance *inst)
{
int ret;
switch (inst->flags & IRQ_RESOURCE_TYPE) {
case IRQ_RESOURCE_AUTO:
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
if (ret > 0) {
dev_dbg(&pdev->dev, "Using gpio irq\n");
break;
}
ret = platform_get_irq(pdev, inst->irq_idx);
if (ret > 0) {
dev_dbg(&pdev->dev, "Using platform irq\n");
break;
}
break;
case IRQ_RESOURCE_GPIO:
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
break;
case IRQ_RESOURCE_APIC:
ret = platform_get_irq(pdev, inst->irq_idx);
break;
default:
return 0;
}
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Error requesting irq at index %d\n",
inst->irq_idx);
return ret;
}
static void smi_devs_unregister(struct smi *smi)
{
while (smi->i2c_num--)
i2c_unregister_device(smi->i2c_devs[smi->i2c_num]);
while (smi->spi_num--)
spi_unregister_device(smi->spi_devs[smi->spi_num]);
}
/**
* smi_spi_probe - Instantiate multiple SPI devices from inst array
* @pdev: Platform device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of SPI devices instantiate, Zero if none is found or a negative error code.
*/
static int smi_spi_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
struct spi_controller *ctlr;
struct spi_device *spi_dev;
char name[50];
int i, ret, count;
ret = acpi_spi_count_resources(adev);
if (ret < 0)
return ret;
if (!ret)
return -ENOENT;
count = ret;
smi->spi_devs = devm_kcalloc(dev, count, sizeof(*smi->spi_devs), GFP_KERNEL);
if (!smi->spi_devs)
return -ENOMEM;
for (i = 0; i < count && inst_array[i].type; i++) {
spi_dev = acpi_spi_device_alloc(NULL, adev, i);
if (IS_ERR(spi_dev)) {
ret = dev_err_probe(dev, PTR_ERR(spi_dev), "failed to allocate SPI device %s from ACPI\n",
dev_name(&adev->dev));
goto error;
}
ctlr = spi_dev->controller;
strscpy(spi_dev->modalias, inst_array[i].type, sizeof(spi_dev->modalias));
ret = smi_get_irq(pdev, adev, &inst_array[i]);
if (ret < 0) {
spi_dev_put(spi_dev);
goto error;
}
spi_dev->irq = ret;
snprintf(name, sizeof(name), "%s-%s-%s.%d", dev_name(&ctlr->dev), dev_name(dev),
inst_array[i].type, i);
spi_dev->dev.init_name = name;
ret = spi_add_device(spi_dev);
if (ret) {
dev_err_probe(&ctlr->dev, ret, "failed to add SPI device %s from ACPI\n",
dev_name(&adev->dev));
spi_dev_put(spi_dev);
goto error;
}
dev_dbg(dev, "SPI device %s using chip select %u", name,
spi_get_chipselect(spi_dev, 0));
smi->spi_devs[i] = spi_dev;
smi->spi_num++;
}
if (smi->spi_num < count) {
dev_dbg(dev, "Error finding driver, idx %d\n", i);
ret = -ENODEV;
goto error;
}
dev_info(dev, "Instantiated %d SPI devices.\n", smi->spi_num);
return 0;
error:
smi_devs_unregister(smi);
return ret;
}
/**
* smi_i2c_probe - Instantiate multiple I2C devices from inst array
* @pdev: Platform device
* @smi: Internal struct for Serial multi instantiate driver
* @inst_array: Array of instances to probe
*
* Returns the number of I2C devices instantiate, Zero if none is found or a negative error code.
*/
static int smi_i2c_probe(struct platform_device *pdev, struct smi *smi,
const struct smi_instance *inst_array)
{
struct i2c_board_info board_info = {};
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
char name[32];
int i, ret, count;
ret = i2c_acpi_client_count(adev);
if (ret < 0)
return ret;
if (!ret)
return -ENOENT;
count = ret;
smi->i2c_devs = devm_kcalloc(dev, count, sizeof(*smi->i2c_devs), GFP_KERNEL);
if (!smi->i2c_devs)
return -ENOMEM;
for (i = 0; i < count && inst_array[i].type; i++) {
memset(&board_info, 0, sizeof(board_info));
strscpy(board_info.type, inst_array[i].type, I2C_NAME_SIZE);
snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev), inst_array[i].type, i);
board_info.dev_name = name;
ret = smi_get_irq(pdev, adev, &inst_array[i]);
if (ret < 0)
goto error;
board_info.irq = ret;
smi->i2c_devs[i] = i2c_acpi_new_device(dev, i, &board_info);
if (IS_ERR(smi->i2c_devs[i])) {
ret = dev_err_probe(dev, PTR_ERR(smi->i2c_devs[i]),
"Error creating i2c-client, idx %d\n", i);
goto error;
}
smi->i2c_num++;
}
if (smi->i2c_num < count) {
dev_dbg(dev, "Error finding driver, idx %d\n", i);
ret = -ENODEV;
goto error;
}
dev_info(dev, "Instantiated %d I2C devices.\n", smi->i2c_num);
return 0;
error:
smi_devs_unregister(smi);
return ret;
}
static int smi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct smi_node *node;
struct smi *smi;
int ret;
node = device_get_match_data(dev);
if (!node) {
dev_dbg(dev, "Error ACPI match data is missing\n");
return -ENODEV;
}
smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
if (!smi)
return -ENOMEM;
platform_set_drvdata(pdev, smi);
switch (node->bus_type) {
case SMI_I2C:
return smi_i2c_probe(pdev, smi, node->instances);
case SMI_SPI:
return smi_spi_probe(pdev, smi, node->instances);
case SMI_AUTO_DETECT:
/*
* For backwards-compatibility with the existing nodes I2C
* is checked first and if such entries are found ONLY I2C
* devices are created. Since some existing nodes that were
* already handled by this driver could also contain unrelated
* SpiSerialBus nodes that were previously ignored, and this
* preserves that behavior.
*/
ret = smi_i2c_probe(pdev, smi, node->instances);
if (ret != -ENOENT)
return ret;
return smi_spi_probe(pdev, smi, node->instances);
default:
return -EINVAL;
}
}
static void smi_remove(struct platform_device *pdev)
{
struct smi *smi = platform_get_drvdata(pdev);
smi_devs_unregister(smi);
}
static const struct smi_node bsg1160_data = {
.instances = {
{ "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
{ "bmc150_magn" },
{ "bmg160" },
{}
},
.bus_type = SMI_I2C,
};
static const struct smi_node bsg2150_data = {
.instances = {
{ "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
{ "bmc150_magn" },
/* The resources describe a 3th client, but it is not really there. */
{ "bsg2150_dummy_dev" },
{}
},
.bus_type = SMI_I2C,
};
static const struct smi_node int3515_data = {
.instances = {
{ "tps6598x", IRQ_RESOURCE_APIC, 0 },
{ "tps6598x", IRQ_RESOURCE_APIC, 1 },
{ "tps6598x", IRQ_RESOURCE_APIC, 2 },
{ "tps6598x", IRQ_RESOURCE_APIC, 3 },
{}
},
.bus_type = SMI_I2C,
};
static const struct smi_node cs35l41_hda = {
.instances = {
{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l41-hda", IRQ_RESOURCE_AUTO, 0 },
{}
},
.bus_type = SMI_AUTO_DETECT,
};
static const struct smi_node cs35l56_hda = {
.instances = {
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
{ "cs35l56-hda", IRQ_RESOURCE_AUTO, 0 },
/* a 5th entry is an alias address, not a real device */
{ "cs35l56-hda_dummy_dev" },
{}
},
.bus_type = SMI_AUTO_DETECT,
};
/*
* Note new device-ids must also be added to ignore_serial_bus_ids in
* drivers/acpi/scan.c: acpi_device_enumeration_by_parent().
*/
static const struct acpi_device_id smi_acpi_ids[] = {
{ "BSG1160", (unsigned long)&bsg1160_data },
{ "BSG2150", (unsigned long)&bsg2150_data },
{ "CSC3551", (unsigned long)&cs35l41_hda },
{ "CSC3556", (unsigned long)&cs35l56_hda },
{ "INT3515", (unsigned long)&int3515_data },
/* Non-conforming _HID for Cirrus Logic already released */
{ "CLSA0100", (unsigned long)&cs35l41_hda },
{ "CLSA0101", (unsigned long)&cs35l41_hda },
{ }
};
MODULE_DEVICE_TABLE(acpi, smi_acpi_ids);
static struct platform_driver smi_driver = {
.driver = {
.name = "Serial bus multi instantiate pseudo device driver",
.acpi_match_table = smi_acpi_ids,
},
.probe = smi_probe,
.remove_new = smi_remove,
};
module_platform_driver(smi_driver);
MODULE_DESCRIPTION("Serial multi instantiate pseudo device driver");
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/serial-multi-instantiate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Asus PC WMI hotkey driver
*
* Copyright(C) 2010 Intel Corporation.
* Copyright(C) 2010-2011 Corentin Chary <[email protected]>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <[email protected]>
* Copyright (C) 2005 Bernhard Rosenkraenzer <[email protected]>
* Copyright (C) 2005 Dmitry Torokhov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/dmi.h>
#include <linux/fb.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/platform_data/x86/asus-wmi.h>
#include <linux/platform_device.h>
#include <linux/platform_profile.h>
#include <linux/power_supply.h>
#include <linux/rfkill.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/units.h>
#include <acpi/battery.h>
#include <acpi/video.h>
#include "asus-wmi.h"
MODULE_AUTHOR("Corentin Chary <[email protected]>");
MODULE_AUTHOR("Yong Wang <[email protected]>");
MODULE_DESCRIPTION("Asus Generic WMI Driver");
MODULE_LICENSE("GPL");
static bool fnlock_default = true;
module_param(fnlock_default, bool, 0444);
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
#define NOTIFY_FNLOCK_TOGGLE 0x4e
#define NOTIFY_KBD_DOCK_CHANGE 0x75
#define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7
#define NOTIFY_KBD_FBM 0x99
#define NOTIFY_KBD_TTP 0xae
#define NOTIFY_LID_FLIP 0xfa
#define NOTIFY_LID_FLIP_ROG 0xbd
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
#define ASUS_MID_FAN_DESC "mid_fan"
#define ASUS_GPU_FAN_DESC "gpu_fan"
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
#define ASUS_FAN_SFUN_WRITE 0x07
/* Based on standard hwmon pwmX_enable values */
#define ASUS_FAN_CTRL_FULLSPEED 0
#define ASUS_FAN_CTRL_MANUAL 1
#define ASUS_FAN_CTRL_AUTO 2
#define ASUS_FAN_BOOST_MODE_NORMAL 0
#define ASUS_FAN_BOOST_MODE_OVERBOOST 1
#define ASUS_FAN_BOOST_MODE_OVERBOOST_MASK 0x01
#define ASUS_FAN_BOOST_MODE_SILENT 2
#define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02
#define ASUS_FAN_BOOST_MODES_MASK 0x03
#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0
#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1
#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
#define ASUS_ACPI_UID_ATK "ATK"
#define WMI_EVENT_QUEUE_SIZE 0x10
#define WMI_EVENT_QUEUE_END 0x1
#define WMI_EVENT_MASK 0xFFFF
/* The WMI hotkey event value is always the same. */
#define WMI_EVENT_VALUE_ATK 0xFF
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
#define FAN_CURVE_DEV_MID 0x02
/* Mask to determine if setting temperature or percentage */
#define FAN_CURVE_PWM_MASK 0x04
/* Limits for tunables available on ASUS ROG laptops */
#define PPT_TOTAL_MIN 5
#define PPT_TOTAL_MAX 250
#define PPT_CPU_MIN 5
#define PPT_CPU_MAX 130
#define NVIDIA_BOOST_MIN 5
#define NVIDIA_BOOST_MAX 25
#define NVIDIA_TEMP_MIN 75
#define NVIDIA_TEMP_MAX 87
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
static int throttle_thermal_policy_write(struct asus_wmi *);
static bool ashs_present(void)
{
int i = 0;
while (ashs_ids[i]) {
if (acpi_dev_found(ashs_ids[i++]))
return true;
}
return false;
}
struct bios_args {
u32 arg0;
u32 arg1;
u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
u32 arg3;
u32 arg4; /* Some ROG laptops require a full 5 input args */
u32 arg5;
} __packed;
/*
* Struct that's used for all methods called via AGFN. Naming is
* identically to the AML code.
*/
struct agfn_args {
u16 mfun; /* probably "Multi-function" to be called */
u16 sfun; /* probably "Sub-function" to be called */
u16 len; /* size of the hole struct, including subfunction fields */
u8 stas; /* not used by now */
u8 err; /* zero on success */
} __packed;
/* struct used for calling fan read and write methods */
struct agfn_fan_args {
struct agfn_args agfn; /* common fields */
u8 fan; /* fan number: 0: set auto mode 1: 1st fan */
u32 speed; /* read: RPM/100 - write: 0-255 */
} __packed;
/*
* <platform>/ - debugfs root directory
* dev_id - current dev_id
* ctrl_param - current ctrl_param
* method_id - current method_id
* devs - call DEVS(dev_id, ctrl_param) and print result
* dsts - call DSTS(dev_id) and print result
* call - call method_id(dev_id, ctrl_param) and print result
*/
struct asus_wmi_debug {
struct dentry *root;
u32 method_id;
u32 dev_id;
u32 ctrl_param;
};
struct asus_rfkill {
struct asus_wmi *asus;
struct rfkill *rfkill;
u32 dev_id;
};
enum fan_type {
FAN_TYPE_NONE = 0,
FAN_TYPE_AGFN, /* deprecated on newer platforms */
FAN_TYPE_SPEC83, /* starting in Spec 8.3, use CPU_FAN_CTRL */
};
struct fan_curve_data {
bool enabled;
u32 device_id;
u8 temps[FAN_CURVE_POINTS];
u8 percents[FAN_CURVE_POINTS];
};
struct asus_wmi {
int dsts_id;
int spec;
int sfun;
bool wmi_event_queue;
struct input_dev *inputdev;
struct backlight_device *backlight_device;
struct platform_device *platform_device;
struct led_classdev wlan_led;
int wlan_led_wk;
struct led_classdev tpd_led;
int tpd_led_wk;
struct led_classdev kbd_led;
int kbd_led_wk;
struct led_classdev lightbar_led;
int lightbar_led_wk;
struct led_classdev micmute_led;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct wlan_led_work;
struct work_struct lightbar_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
struct asus_rfkill wimax;
struct asus_rfkill wwan3g;
struct asus_rfkill gps;
struct asus_rfkill uwb;
int tablet_switch_event_code;
u32 tablet_switch_dev_id;
bool tablet_switch_inverted;
enum fan_type fan_type;
enum fan_type gpu_fan_type;
enum fan_type mid_fan_type;
int fan_pwm_mode;
int gpu_fan_pwm_mode;
int mid_fan_pwm_mode;
int agfn_pwm;
bool fan_boost_mode_available;
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
bool charge_mode_available;
bool egpu_enable_available;
bool egpu_connect_available;
bool dgpu_disable_available;
bool gpu_mux_mode_available;
/* Tunables provided by ASUS for gaming laptops */
bool ppt_pl2_sppt_available;
bool ppt_pl1_spl_available;
bool ppt_apu_sppt_available;
bool ppt_plat_sppt_available;
bool ppt_fppt_available;
bool nv_dyn_boost_available;
bool nv_temp_tgt_available;
bool kbd_rgb_mode_available;
bool kbd_rgb_state_available;
bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
bool cpu_fan_curve_available;
bool gpu_fan_curve_available;
bool mid_fan_curve_available;
struct fan_curve_data custom_fan_curves[3];
struct platform_profile_handler platform_profile_handler;
bool platform_profile_support;
// The RSOC controls the maximum charging percentage.
bool battery_rsoc_available;
bool panel_overdrive_available;
bool mini_led_mode_available;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
struct workqueue_struct *hotplug_workqueue;
struct work_struct hotplug_work;
bool fnlock_locked;
struct asus_wmi_debug debug;
struct asus_wmi_driver *driver;
};
/* WMI ************************************************************************/
static int asus_wmi_evaluate_method3(u32 method_id,
u32 arg0, u32 arg1, u32 arg2, u32 *retval)
{
struct bios_args args = {
.arg0 = arg0,
.arg1 = arg1,
.arg2 = arg2,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
union acpi_object *obj;
u32 tmp = 0;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
if (retval)
*retval = tmp;
kfree(obj);
if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
return -ENODEV;
return 0;
}
int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
{
return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval);
}
EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
static int asus_wmi_evaluate_method5(u32 method_id,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval)
{
struct bios_args args = {
.arg0 = arg0,
.arg1 = arg1,
.arg2 = arg2,
.arg3 = arg3,
.arg4 = arg4,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
union acpi_object *obj;
u32 tmp = 0;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
tmp = (u32) obj->integer.value;
if (retval)
*retval = tmp;
kfree(obj);
if (tmp == ASUS_WMI_UNSUPPORTED_METHOD)
return -ENODEV;
return 0;
}
/*
* Returns as an error if the method output is not a buffer. Typically this
* means that the method called is unsupported.
*/
static int asus_wmi_evaluate_method_buf(u32 method_id,
u32 arg0, u32 arg1, u8 *ret_buffer, size_t size)
{
struct bios_args args = {
.arg0 = arg0,
.arg1 = arg1,
.arg2 = 0,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
union acpi_object *obj;
int err = 0;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 0, method_id,
&input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
switch (obj->type) {
case ACPI_TYPE_BUFFER:
if (obj->buffer.length > size) {
err = -ENOSPC;
break;
}
if (obj->buffer.length == 0) {
err = -ENODATA;
break;
}
memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
break;
case ACPI_TYPE_INTEGER:
err = (u32)obj->integer.value;
if (err == ASUS_WMI_UNSUPPORTED_METHOD)
err = -ENODEV;
/*
* At least one method returns a 0 with no buffer if no arg
* is provided, such as ASUS_WMI_DEVID_CPU_FAN_CURVE
*/
if (err == 0)
err = -ENODATA;
break;
default:
err = -ENODATA;
break;
}
kfree(obj);
if (err)
return err;
return 0;
}
static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
{
struct acpi_buffer input;
u64 phys_addr;
u32 retval;
u32 status;
/*
* Copy to dma capable address otherwise memory corruption occurs as
* bios has to be able to access it.
*/
input.pointer = kmemdup(args.pointer, args.length, GFP_DMA | GFP_KERNEL);
input.length = args.length;
if (!input.pointer)
return -ENOMEM;
phys_addr = virt_to_phys(input.pointer);
status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_AGFN,
phys_addr, 0, &retval);
if (!status)
memcpy(args.pointer, input.pointer, args.length);
kfree(input.pointer);
if (status)
return -ENXIO;
return retval;
}
static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
{
return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval);
}
static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
u32 *retval)
{
return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
ctrl_param, retval);
}
/* Helper for special devices with magic return codes */
static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
u32 dev_id, u32 mask)
{
u32 retval = 0;
int err;
err = asus_wmi_get_devstate(asus, dev_id, &retval);
if (err < 0)
return err;
if (!(retval & ASUS_WMI_DSTS_PRESENCE_BIT))
return -ENODEV;
if (mask == ASUS_WMI_DSTS_STATUS_BIT) {
if (retval & ASUS_WMI_DSTS_UNKNOWN_BIT)
return -ENODEV;
}
return retval & mask;
}
static int asus_wmi_get_devstate_simple(struct asus_wmi *asus, u32 dev_id)
{
return asus_wmi_get_devstate_bits(asus, dev_id,
ASUS_WMI_DSTS_STATUS_BIT);
}
static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
{
u32 retval;
int status = asus_wmi_get_devstate(asus, dev_id, &retval);
return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
}
/* Input **********************************************************************/
static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value)
{
input_report_switch(asus->inputdev, SW_TABLET_MODE,
asus->tablet_switch_inverted ? !value : value);
input_sync(asus->inputdev);
}
static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
{
struct device *dev = &asus->platform_device->dev;
int result;
result = asus_wmi_get_devstate_simple(asus, dev_id);
if (result >= 0) {
input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
asus_wmi_tablet_sw_report(asus, result);
asus->tablet_switch_dev_id = dev_id;
asus->tablet_switch_event_code = event_code;
} else if (result == -ENODEV) {
dev_err(dev, "This device has tablet-mode-switch quirk but got ENODEV checking it. This is a bug.");
} else {
dev_err(dev, "Error checking for tablet-mode-switch: %d\n", result);
}
}
static int asus_wmi_input_init(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
int err;
asus->inputdev = input_allocate_device();
if (!asus->inputdev)
return -ENOMEM;
asus->inputdev->name = asus->driver->input_name;
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = dev;
set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
goto err_free_dev;
switch (asus->driver->quirks->tablet_switch_mode) {
case asus_wmi_no_tablet_switch:
break;
case asus_wmi_kbd_dock_devid:
asus->tablet_switch_inverted = true;
asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
break;
case asus_wmi_lid_flip_devid:
asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP, NOTIFY_LID_FLIP);
break;
case asus_wmi_lid_flip_rog_devid:
asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP_ROG, NOTIFY_LID_FLIP_ROG);
break;
}
err = input_register_device(asus->inputdev);
if (err)
goto err_free_dev;
return 0;
err_free_dev:
input_free_device(asus->inputdev);
return err;
}
static void asus_wmi_input_exit(struct asus_wmi *asus)
{
if (asus->inputdev)
input_unregister_device(asus->inputdev);
asus->inputdev = NULL;
}
/* Tablet mode ****************************************************************/
static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
{
int result;
if (!asus->tablet_switch_dev_id)
return;
result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
if (result >= 0)
asus_wmi_tablet_sw_report(asus, result);
}
/* Charging mode, 1=Barrel, 2=USB ******************************************/
static ssize_t charge_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result, value;
result = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CHARGE_MODE, &value);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", value & 0xff);
}
static DEVICE_ATTR_RO(charge_mode);
/* dGPU ********************************************************************/
static ssize_t dgpu_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
/*
* A user may be required to store the value twice, typcial store first, then
* rescan PCI bus to activate power, then store a second time to save correctly.
* The reason for this is that an extra code path in the ACPI is enabled when
* the device and bus are powered.
*/
static ssize_t dgpu_disable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 disable;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &disable);
if (result)
return result;
if (disable > 1)
return -EINVAL;
if (asus->gpu_mux_mode_available) {
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
if (result < 0)
/* An error here may signal greater failure of GPU handling */
return result;
if (!result && disable) {
err = -ENODEV;
pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err);
return err;
}
}
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
if (err) {
pr_warn("Failed to set dgpu disable: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set dgpu disable (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
return count;
}
static DEVICE_ATTR_RW(dgpu_disable);
/* eGPU ********************************************************************/
static ssize_t egpu_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
/* The ACPI call to enable the eGPU also disables the internal dGPU */
static ssize_t egpu_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 enable;
struct asus_wmi *asus = dev_get_drvdata(dev);
err = kstrtou32(buf, 10, &enable);
if (err)
return err;
if (enable > 1)
return -EINVAL;
err = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
if (err < 0) {
pr_warn("Failed to get egpu connection status: %d\n", err);
return err;
}
if (asus->gpu_mux_mode_available) {
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
if (result < 0) {
/* An error here may signal greater failure of GPU handling */
pr_warn("Failed to get gpu mux status: %d\n", result);
return result;
}
if (!result && enable) {
err = -ENODEV;
pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err);
return err;
}
}
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
if (err) {
pr_warn("Failed to set egpu state: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set egpu state (retval): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
return count;
}
static DEVICE_ATTR_RW(egpu_enable);
/* Is eGPU connected? *********************************************************/
static ssize_t egpu_connected_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
static DEVICE_ATTR_RO(egpu_connected);
/* gpu mux switch *************************************************************/
static ssize_t gpu_mux_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
static ssize_t gpu_mux_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result, err;
u32 optimus;
err = kstrtou32(buf, 10, &optimus);
if (err)
return err;
if (optimus > 1)
return -EINVAL;
if (asus->dgpu_disable_available) {
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
if (result < 0)
/* An error here may signal greater failure of GPU handling */
return result;
if (result && !optimus) {
err = -ENODEV;
pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %d\n", err);
return err;
}
}
if (asus->egpu_enable_available) {
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
if (result < 0)
/* An error here may signal greater failure of GPU handling */
return result;
if (result && !optimus) {
err = -ENODEV;
pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", err);
return err;
}
}
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
if (err) {
dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
return err;
}
/* !1 is considered a fail by ASUS */
if (result != 1) {
dev_warn(dev, "Failed to set GPU MUX mode (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "gpu_mux_mode");
return count;
}
static DEVICE_ATTR_RW(gpu_mux_mode);
/* TUF Laptop Keyboard RGB Modes **********************************************/
static ssize_t kbd_rgb_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 cmd, mode, r, g, b, speed;
int err;
if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
return -EINVAL;
/* B3 is set and B4 is save to BIOS */
switch (cmd) {
case 0:
cmd = 0xb3;
break;
case 1:
cmd = 0xb4;
break;
default:
return -EINVAL;
}
/* These are the known usable modes across all TUF/ROG */
if (mode >= 12 || mode == 9)
mode = 10;
switch (speed) {
case 0:
speed = 0xe1;
break;
case 1:
speed = 0xeb;
break;
case 2:
speed = 0xf5;
break;
default:
speed = 0xeb;
}
err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
if (err)
return err;
return count;
}
static DEVICE_ATTR_WO(kbd_rgb_mode);
static ssize_t kbd_rgb_mode_index_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", "cmd mode red green blue speed");
}
static DEVICE_ATTR_RO(kbd_rgb_mode_index);
static struct attribute *kbd_rgb_mode_attrs[] = {
&dev_attr_kbd_rgb_mode.attr,
&dev_attr_kbd_rgb_mode_index.attr,
NULL,
};
static const struct attribute_group kbd_rgb_mode_group = {
.attrs = kbd_rgb_mode_attrs,
};
/* TUF Laptop Keyboard RGB State **********************************************/
static ssize_t kbd_rgb_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 flags, cmd, boot, awake, sleep, keyboard;
int err;
if (sscanf(buf, "%d %d %d %d %d", &cmd, &boot, &awake, &sleep, &keyboard) != 5)
return -EINVAL;
if (cmd)
cmd = BIT(2);
flags = 0;
if (boot)
flags |= BIT(1);
if (awake)
flags |= BIT(3);
if (sleep)
flags |= BIT(5);
if (keyboard)
flags |= BIT(7);
/* 0xbd is the required default arg0 for the method. Nothing happens otherwise */
err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS,
ASUS_WMI_DEVID_TUF_RGB_STATE, 0xbd | cmd << 8 | (flags << 16), 0, NULL);
if (err)
return err;
return count;
}
static DEVICE_ATTR_WO(kbd_rgb_state);
static ssize_t kbd_rgb_state_index_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", "cmd boot awake sleep keyboard");
}
static DEVICE_ATTR_RO(kbd_rgb_state_index);
static struct attribute *kbd_rgb_state_attrs[] = {
&dev_attr_kbd_rgb_state.attr,
&dev_attr_kbd_rgb_state_index.attr,
NULL,
};
static const struct attribute_group kbd_rgb_state_group = {
.attrs = kbd_rgb_state_attrs,
};
static const struct attribute_group *kbd_rgb_mode_groups[] = {
NULL,
NULL,
NULL,
};
/* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/
static ssize_t ppt_pl2_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL2_SPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_pl2_sppt: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set ppt_pl2_sppt (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl2_sppt");
return count;
}
static DEVICE_ATTR_WO(ppt_pl2_sppt);
/* Tunable: PPT, Intel=PL1, AMD=SPL ******************************************/
static ssize_t ppt_pl1_spl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL1_SPL, value, &result);
if (err) {
pr_warn("Failed to set ppt_pl1_spl: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set ppt_pl1_spl (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl1_spl");
return count;
}
static DEVICE_ATTR_WO(ppt_pl1_spl);
/* Tunable: PPT APU FPPT ******************************************************/
static ssize_t ppt_fppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_FPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_fppt: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set ppt_fppt (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_fpu_sppt");
return count;
}
static DEVICE_ATTR_WO(ppt_fppt);
/* Tunable: PPT APU SPPT *****************************************************/
static ssize_t ppt_apu_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < PPT_CPU_MIN || value > PPT_CPU_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_APU_SPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_apu_sppt: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set ppt_apu_sppt (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_apu_sppt");
return count;
}
static DEVICE_ATTR_WO(ppt_apu_sppt);
/* Tunable: PPT platform SPPT ************************************************/
static ssize_t ppt_platform_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < PPT_CPU_MIN || value > PPT_CPU_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PLAT_SPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_platform_sppt: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set ppt_platform_sppt (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_platform_sppt");
return count;
}
static DEVICE_ATTR_WO(ppt_platform_sppt);
/* Tunable: NVIDIA dynamic boost *********************************************/
static ssize_t nv_dynamic_boost_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < NVIDIA_BOOST_MIN || value > NVIDIA_BOOST_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_NV_DYN_BOOST, value, &result);
if (err) {
pr_warn("Failed to set nv_dynamic_boost: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set nv_dynamic_boost (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_dynamic_boost");
return count;
}
static DEVICE_ATTR_WO(nv_dynamic_boost);
/* Tunable: NVIDIA temperature target ****************************************/
static ssize_t nv_temp_target_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 value;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &value);
if (result)
return result;
if (value < NVIDIA_TEMP_MIN || value > NVIDIA_TEMP_MAX)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_NV_THERM_TARGET, value, &result);
if (err) {
pr_warn("Failed to set nv_temp_target: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set nv_temp_target (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_temp_target");
return count;
}
static DEVICE_ATTR_WO(nv_temp_target);
/* Battery ********************************************************************/
/* The battery maximum charging percentage */
static int charge_end_threshold;
static ssize_t charge_control_end_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int value, ret, rv;
ret = kstrtouint(buf, 10, &value);
if (ret)
return ret;
if (value < 0 || value > 100)
return -EINVAL;
ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, value, &rv);
if (ret)
return ret;
if (rv != 1)
return -EIO;
/* There isn't any method in the DSDT to read the threshold, so we
* save the threshold.
*/
charge_end_threshold = value;
return count;
}
static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", charge_end_threshold);
}
static DEVICE_ATTR_RW(charge_control_end_threshold);
static int asus_wmi_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
{
/* The WMI method does not provide a way to specific a battery, so we
* just assume it is the first battery.
* Note: On some newer ASUS laptops (Zenbook UM431DA), the primary/first
* battery is named BATT.
*/
if (strcmp(battery->desc->name, "BAT0") != 0 &&
strcmp(battery->desc->name, "BAT1") != 0 &&
strcmp(battery->desc->name, "BATC") != 0 &&
strcmp(battery->desc->name, "BATT") != 0)
return -ENODEV;
if (device_create_file(&battery->dev,
&dev_attr_charge_control_end_threshold))
return -ENODEV;
/* The charge threshold is only reset when the system is power cycled,
* and we can't get the current threshold so let set it to 100% when
* a battery is added.
*/
asus_wmi_set_devstate(ASUS_WMI_DEVID_RSOC, 100, NULL);
charge_end_threshold = 100;
return 0;
}
static int asus_wmi_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook)
{
device_remove_file(&battery->dev,
&dev_attr_charge_control_end_threshold);
return 0;
}
static struct acpi_battery_hook battery_hook = {
.add_battery = asus_wmi_battery_add,
.remove_battery = asus_wmi_battery_remove,
.name = "ASUS Battery Extension",
};
static void asus_wmi_battery_init(struct asus_wmi *asus)
{
asus->battery_rsoc_available = false;
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_RSOC)) {
asus->battery_rsoc_available = true;
battery_hook_register(&battery_hook);
}
}
static void asus_wmi_battery_exit(struct asus_wmi *asus)
{
if (asus->battery_rsoc_available)
battery_hook_unregister(&battery_hook);
}
/* LEDs ***********************************************************************/
/*
* These functions actually update the LED's, and are called from a
* workqueue. By doing this as separate work rather than when the LED
* subsystem asks, we avoid messing with the Asus ACPI stuff during a
* potentially bad time, such as a timer interrupt.
*/
static void tpd_led_update(struct work_struct *work)
{
int ctrl_param;
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, tpd_led_work);
ctrl_param = asus->tpd_led_wk;
asus_wmi_set_devstate(ASUS_WMI_DEVID_TOUCHPAD_LED, ctrl_param, NULL);
}
static void tpd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, tpd_led);
asus->tpd_led_wk = !!value;
queue_work(asus->led_workqueue, &asus->tpd_led_work);
}
static int read_tpd_led_state(struct asus_wmi *asus)
{
return asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_TOUCHPAD_LED);
}
static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, tpd_led);
return read_tpd_led_state(asus);
}
static void kbd_led_update(struct asus_wmi *asus)
{
int ctrl_param = 0;
ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
{
int retval;
/*
* bits 0-2: level
* bit 7: light on/off
* bit 8-10: environment (0: dark, 1: normal, 2: light)
* bit 17: status unknown
*/
retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
0xFFFF);
/* Unknown status is considered as off */
if (retval == 0x8000)
retval = 0;
if (retval < 0)
return retval;
if (level)
*level = retval & 0x7F;
if (env)
*env = (retval >> 8) & 0x7F;
return 0;
}
static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
{
struct asus_wmi *asus;
int max_level;
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
max_level = asus->kbd_led.max_brightness;
asus->kbd_led_wk = clamp_val(value, 0, max_level);
kbd_led_update(asus);
}
static void kbd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
/* Prevent disabling keyboard backlight on module unregister */
if (led_cdev->flags & LED_UNREGISTERING)
return;
do_kbd_led_set(led_cdev, value);
}
static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value)
{
struct led_classdev *led_cdev = &asus->kbd_led;
do_kbd_led_set(led_cdev, value);
led_classdev_notify_brightness_hw_changed(led_cdev, asus->kbd_led_wk);
}
static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
int retval, value;
asus = container_of(led_cdev, struct asus_wmi, kbd_led);
retval = kbd_led_read(asus, &value, NULL);
if (retval < 0)
return retval;
return value;
}
static int wlan_led_unknown_state(struct asus_wmi *asus)
{
u32 result;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
}
static void wlan_led_update(struct work_struct *work)
{
int ctrl_param;
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, wlan_led_work);
ctrl_param = asus->wlan_led_wk;
asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
}
static void wlan_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, wlan_led);
asus->wlan_led_wk = !!value;
queue_work(asus->led_workqueue, &asus->wlan_led_work);
}
static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
u32 result;
asus = container_of(led_cdev, struct asus_wmi, wlan_led);
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
static void lightbar_led_update(struct work_struct *work)
{
struct asus_wmi *asus;
int ctrl_param;
asus = container_of(work, struct asus_wmi, lightbar_led_work);
ctrl_param = asus->lightbar_led_wk;
asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL);
}
static void lightbar_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct asus_wmi *asus;
asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
asus->lightbar_led_wk = !!value;
queue_work(asus->led_workqueue, &asus->lightbar_led_work);
}
static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
{
struct asus_wmi *asus;
u32 result;
asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
}
static int micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
int state = brightness != LED_OFF;
int err;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MICMUTE_LED, state, NULL);
return err < 0 ? err : 0;
}
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
led_classdev_unregister(&asus->kbd_led);
led_classdev_unregister(&asus->tpd_led);
led_classdev_unregister(&asus->wlan_led);
led_classdev_unregister(&asus->lightbar_led);
led_classdev_unregister(&asus->micmute_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
static int asus_wmi_led_init(struct asus_wmi *asus)
{
int rv = 0, num_rgb_groups = 0, led_val;
if (asus->kbd_rgb_mode_available)
kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group;
if (asus->kbd_rgb_state_available)
kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group;
asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!asus->led_workqueue)
return -ENOMEM;
if (read_tpd_led_state(asus) >= 0) {
INIT_WORK(&asus->tpd_led_work, tpd_led_update);
asus->tpd_led.name = "asus::touchpad";
asus->tpd_led.brightness_set = tpd_led_set;
asus->tpd_led.brightness_get = tpd_led_get;
asus->tpd_led.max_brightness = 1;
rv = led_classdev_register(&asus->platform_device->dev,
&asus->tpd_led);
if (rv)
goto error;
}
if (!kbd_led_read(asus, &led_val, NULL)) {
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
asus->kbd_led.brightness_set = kbd_led_set;
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
if (num_rgb_groups != 0)
asus->kbd_led.groups = kbd_rgb_mode_groups;
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
if (rv)
goto error;
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_WIRELESS_LED)
&& (asus->driver->quirks->wapf > 0)) {
INIT_WORK(&asus->wlan_led_work, wlan_led_update);
asus->wlan_led.name = "asus::wlan";
asus->wlan_led.brightness_set = wlan_led_set;
if (!wlan_led_unknown_state(asus))
asus->wlan_led.brightness_get = wlan_led_get;
asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
asus->wlan_led.max_brightness = 1;
asus->wlan_led.default_trigger = "asus-wlan";
rv = led_classdev_register(&asus->platform_device->dev,
&asus->wlan_led);
if (rv)
goto error;
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_LIGHTBAR)) {
INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
asus->lightbar_led.name = "asus::lightbar";
asus->lightbar_led.brightness_set = lightbar_led_set;
asus->lightbar_led.brightness_get = lightbar_led_get;
asus->lightbar_led.max_brightness = 1;
rv = led_classdev_register(&asus->platform_device->dev,
&asus->lightbar_led);
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
asus->micmute_led.default_trigger = "audio-micmute";
rv = led_classdev_register(&asus->platform_device->dev,
&asus->micmute_led);
if (rv)
goto error;
}
error:
if (rv)
asus_wmi_led_exit(asus);
return rv;
}
/* RF *************************************************************************/
/*
* PCI hotplug (for wlan rfkill)
*/
static bool asus_wlan_rfkill_blocked(struct asus_wmi *asus)
{
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
return false;
return !result;
}
static void asus_rfkill_hotplug(struct asus_wmi *asus)
{
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked;
bool absent;
u32 l;
mutex_lock(&asus->wmi_lock);
blocked = asus_wlan_rfkill_blocked(asus);
mutex_unlock(&asus->wmi_lock);
mutex_lock(&asus->hotplug_lock);
pci_lock_rescan_remove();
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
if (asus->hotplug_slot.ops) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
goto out_unlock;
}
if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
pr_err("Unable to read PCI config space?\n");
goto out_unlock;
}
absent = (l == 0xffffffff);
if (blocked != absent) {
pr_warn("BIOS says wireless lan is %s, but the pci device is %s\n",
blocked ? "blocked" : "unblocked",
absent ? "absent" : "present");
pr_warn("skipped wireless hotplug as probably inappropriate for this model\n");
goto out_unlock;
}
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
/* Device already present */
pci_dev_put(dev);
goto out_unlock;
}
dev = pci_scan_single_device(bus, 0);
if (dev) {
pci_bus_assign_resources(bus);
pci_bus_add_device(dev);
}
} else {
dev = pci_get_slot(bus, 0);
if (dev) {
pci_stop_and_remove_bus_device(dev);
pci_dev_put(dev);
}
}
}
out_unlock:
pci_unlock_rescan_remove();
mutex_unlock(&asus->hotplug_lock);
}
static void asus_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct asus_wmi *asus = data;
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
/*
* We can't call directly asus_rfkill_hotplug because most
* of the time WMBC is still being executed and not reetrant.
* There is currently no way to tell ACPICA that we want this
* method to be serialized, we schedule a asus_rfkill_hotplug
* call later, in a safer context.
*/
queue_work(asus->hotplug_workqueue, &asus->hotplug_work);
}
static int asus_register_rfkill_notifier(struct asus_wmi *asus, char *node)
{
acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify, asus);
if (ACPI_FAILURE(status))
pr_warn("Failed to register notify on %s\n", node);
return 0;
}
static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_FAILURE(status))
return;
status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
asus_rfkill_notify);
if (ACPI_FAILURE(status))
pr_err("Error removing rfkill notify handler %s\n", node);
}
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
struct asus_wmi *asus = container_of(hotplug_slot,
struct asus_wmi, hotplug_slot);
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
return result;
*value = !!result;
return 0;
}
static const struct hotplug_slot_ops asus_hotplug_slot_ops = {
.get_adapter_status = asus_get_adapter_status,
.get_power_status = asus_get_adapter_status,
};
static void asus_hotplug_work(struct work_struct *work)
{
struct asus_wmi *asus;
asus = container_of(work, struct asus_wmi, hotplug_work);
asus_rfkill_hotplug(asus);
}
static int asus_setup_pci_hotplug(struct asus_wmi *asus)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
if (!bus) {
pr_err("Unable to find wifi PCI bus\n");
return -ENODEV;
}
asus->hotplug_workqueue =
create_singlethread_workqueue("hotplug_workqueue");
if (!asus->hotplug_workqueue)
goto error_workqueue;
INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
asus->hotplug_slot.ops = &asus_hotplug_slot_ops;
ret = pci_hp_register(&asus->hotplug_slot, bus, 0, "asus-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
}
return 0;
error_register:
asus->hotplug_slot.ops = NULL;
destroy_workqueue(asus->hotplug_workqueue);
error_workqueue:
return ret;
}
/*
* Rfkill devices
*/
static int asus_rfkill_set(void *data, bool blocked)
{
struct asus_rfkill *priv = data;
u32 ctrl_param = !blocked;
u32 dev_id = priv->dev_id;
/*
* If the user bit is set, BIOS can't set and record the wlan status,
* it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
* while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
* So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
* while setting the wlan status through WMI.
* This is also the behavior that windows app will do.
*/
if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
priv->asus->driver->wlan_ctrl_by_user)
dev_id = ASUS_WMI_DEVID_WLAN_LED;
return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
}
static void asus_rfkill_query(struct rfkill *rfkill, void *data)
{
struct asus_rfkill *priv = data;
int result;
result = asus_wmi_get_devstate_simple(priv->asus, priv->dev_id);
if (result < 0)
return;
rfkill_set_sw_state(priv->rfkill, !result);
}
static int asus_rfkill_wlan_set(void *data, bool blocked)
{
struct asus_rfkill *priv = data;
struct asus_wmi *asus = priv->asus;
int ret;
/*
* This handler is enabled only if hotplug is enabled.
* In this case, the asus_wmi_set_devstate() will
* trigger a wmi notification and we need to wait
* this call to finish before being able to call
* any wmi method
*/
mutex_lock(&asus->wmi_lock);
ret = asus_rfkill_set(data, blocked);
mutex_unlock(&asus->wmi_lock);
return ret;
}
static const struct rfkill_ops asus_rfkill_wlan_ops = {
.set_block = asus_rfkill_wlan_set,
.query = asus_rfkill_query,
};
static const struct rfkill_ops asus_rfkill_ops = {
.set_block = asus_rfkill_set,
.query = asus_rfkill_query,
};
static int asus_new_rfkill(struct asus_wmi *asus,
struct asus_rfkill *arfkill,
const char *name, enum rfkill_type type, int dev_id)
{
int result = asus_wmi_get_devstate_simple(asus, dev_id);
struct rfkill **rfkill = &arfkill->rfkill;
if (result < 0)
return result;
arfkill->dev_id = dev_id;
arfkill->asus = asus;
if (dev_id == ASUS_WMI_DEVID_WLAN &&
asus->driver->quirks->hotplug_wireless)
*rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
&asus_rfkill_wlan_ops, arfkill);
else
*rfkill = rfkill_alloc(name, &asus->platform_device->dev, type,
&asus_rfkill_ops, arfkill);
if (!*rfkill)
return -EINVAL;
if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
(asus->driver->quirks->wapf > 0))
rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
rfkill_init_sw_state(*rfkill, !result);
result = rfkill_register(*rfkill);
if (result) {
rfkill_destroy(*rfkill);
*rfkill = NULL;
return result;
}
return 0;
}
static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
{
if (asus->driver->wlan_ctrl_by_user && ashs_present())
return;
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
if (asus->wlan.rfkill) {
rfkill_unregister(asus->wlan.rfkill);
rfkill_destroy(asus->wlan.rfkill);
asus->wlan.rfkill = NULL;
}
/*
* Refresh pci hotplug in case the rfkill state was changed after
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
if (asus->hotplug_slot.ops)
pci_hp_deregister(&asus->hotplug_slot);
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
if (asus->bluetooth.rfkill) {
rfkill_unregister(asus->bluetooth.rfkill);
rfkill_destroy(asus->bluetooth.rfkill);
asus->bluetooth.rfkill = NULL;
}
if (asus->wimax.rfkill) {
rfkill_unregister(asus->wimax.rfkill);
rfkill_destroy(asus->wimax.rfkill);
asus->wimax.rfkill = NULL;
}
if (asus->wwan3g.rfkill) {
rfkill_unregister(asus->wwan3g.rfkill);
rfkill_destroy(asus->wwan3g.rfkill);
asus->wwan3g.rfkill = NULL;
}
if (asus->gps.rfkill) {
rfkill_unregister(asus->gps.rfkill);
rfkill_destroy(asus->gps.rfkill);
asus->gps.rfkill = NULL;
}
if (asus->uwb.rfkill) {
rfkill_unregister(asus->uwb.rfkill);
rfkill_destroy(asus->uwb.rfkill);
asus->uwb.rfkill = NULL;
}
}
static int asus_wmi_rfkill_init(struct asus_wmi *asus)
{
int result = 0;
mutex_init(&asus->hotplug_lock);
mutex_init(&asus->wmi_lock);
result = asus_new_rfkill(asus, &asus->wlan, "asus-wlan",
RFKILL_TYPE_WLAN, ASUS_WMI_DEVID_WLAN);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->bluetooth,
"asus-bluetooth", RFKILL_TYPE_BLUETOOTH,
ASUS_WMI_DEVID_BLUETOOTH);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->wimax, "asus-wimax",
RFKILL_TYPE_WIMAX, ASUS_WMI_DEVID_WIMAX);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->wwan3g, "asus-wwan3g",
RFKILL_TYPE_WWAN, ASUS_WMI_DEVID_WWAN3G);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
if (result && result != -ENODEV)
goto exit;
result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
if (result && result != -ENODEV)
goto exit;
if (!asus->driver->quirks->hotplug_wireless)
goto exit;
result = asus_setup_pci_hotplug(asus);
/*
* If we get -EBUSY then something else is handling the PCI hotplug -
* don't fail in this case
*/
if (result == -EBUSY)
result = 0;
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
asus_register_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
/*
* Refresh pci hotplug in case the rfkill state was changed during
* setup.
*/
asus_rfkill_hotplug(asus);
exit:
if (result && result != -ENODEV)
asus_wmi_rfkill_exit(asus);
if (result == -ENODEV)
result = 0;
return result;
}
/* Panel Overdrive ************************************************************/
static ssize_t panel_od_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_PANEL_OD);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
static ssize_t panel_od_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 overdrive;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &overdrive);
if (result)
return result;
if (overdrive > 1)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, overdrive, &result);
if (err) {
pr_warn("Failed to set panel overdrive: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set panel overdrive (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
return count;
}
static DEVICE_ATTR_RW(panel_od);
/* Mini-LED mode **************************************************************/
static ssize_t mini_led_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int result;
result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
if (result < 0)
return result;
return sysfs_emit(buf, "%d\n", result);
}
static ssize_t mini_led_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int result, err;
u32 mode;
struct asus_wmi *asus = dev_get_drvdata(dev);
result = kstrtou32(buf, 10, &mode);
if (result)
return result;
if (mode > 1)
return -EINVAL;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MINI_LED_MODE, mode, &result);
if (err) {
pr_warn("Failed to set mini-LED: %d\n", err);
return err;
}
if (result > 1) {
pr_warn("Failed to set mini-LED mode (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "mini_led_mode");
return count;
}
static DEVICE_ATTR_RW(mini_led_mode);
/* Quirks *********************************************************************/
static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
{
struct pci_dev *xhci_pdev;
u32 orig_ports_available;
u32 ports_available = asus->driver->quirks->xusb2pr;
xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
NULL);
if (!xhci_pdev)
return;
pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
&orig_ports_available);
pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
cpu_to_le32(ports_available));
pci_dev_put(xhci_pdev);
pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
orig_ports_available, ports_available);
}
/*
* Some devices dont support or have borcken get_als method
* but still support set method.
*/
static void asus_wmi_set_als(void)
{
asus_wmi_set_devstate(ASUS_WMI_DEVID_ALS_ENABLE, 1, NULL);
}
/* Hwmon device ***************************************************************/
static int asus_agfn_fan_speed_read(struct asus_wmi *asus, int fan,
int *speed)
{
struct agfn_fan_args args = {
.agfn.len = sizeof(args),
.agfn.mfun = ASUS_FAN_MFUN,
.agfn.sfun = ASUS_FAN_SFUN_READ,
.fan = fan,
.speed = 0,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
int status;
if (fan != 1)
return -EINVAL;
status = asus_wmi_evaluate_method_agfn(input);
if (status || args.agfn.err)
return -ENXIO;
if (speed)
*speed = args.speed;
return 0;
}
static int asus_agfn_fan_speed_write(struct asus_wmi *asus, int fan,
int *speed)
{
struct agfn_fan_args args = {
.agfn.len = sizeof(args),
.agfn.mfun = ASUS_FAN_MFUN,
.agfn.sfun = ASUS_FAN_SFUN_WRITE,
.fan = fan,
.speed = speed ? *speed : 0,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
int status;
/* 1: for setting 1st fan's speed 0: setting auto mode */
if (fan != 1 && fan != 0)
return -EINVAL;
status = asus_wmi_evaluate_method_agfn(input);
if (status || args.agfn.err)
return -ENXIO;
if (speed && fan == 1)
asus->agfn_pwm = *speed;
return 0;
}
/*
* Check if we can read the speed of one fan. If true we assume we can also
* control it.
*/
static bool asus_wmi_has_agfn_fan(struct asus_wmi *asus)
{
int status;
int speed;
u32 value;
status = asus_agfn_fan_speed_read(asus, 1, &speed);
if (status != 0)
return false;
status = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
if (status != 0)
return false;
/*
* We need to find a better way, probably using sfun,
* bits or spec ...
* Currently we disable it if:
* - ASUS_WMI_UNSUPPORTED_METHOD is returned
* - reverved bits are non-zero
* - sfun and presence bit are not set
*/
return !(value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
|| (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)));
}
static int asus_fan_set_auto(struct asus_wmi *asus)
{
int status;
u32 retval;
switch (asus->fan_type) {
case FAN_TYPE_SPEC83:
status = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
0, &retval);
if (status)
return status;
if (retval != 1)
return -EIO;
break;
case FAN_TYPE_AGFN:
status = asus_agfn_fan_speed_write(asus, 0, NULL);
if (status)
return -ENXIO;
break;
default:
return -ENXIO;
}
/*
* Modern models like the G713 also have GPU fan control (this is not AGFN)
*/
if (asus->gpu_fan_type == FAN_TYPE_SPEC83) {
status = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
0, &retval);
if (status)
return status;
if (retval != 1)
return -EIO;
}
return 0;
}
static ssize_t pwm1_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int err;
int value;
/* If we already set a value then just return it */
if (asus->agfn_pwm >= 0)
return sprintf(buf, "%d\n", asus->agfn_pwm);
/*
* If we haven't set already set a value through the AGFN interface,
* we read a current value through the (now-deprecated) FAN_CTRL device.
*/
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value);
if (err < 0)
return err;
value &= 0xFF;
if (value == 1) /* Low Speed */
value = 85;
else if (value == 2)
value = 170;
else if (value == 3)
value = 255;
else if (value) {
pr_err("Unknown fan speed %#x\n", value);
value = -1;
}
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pwm1_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count) {
struct asus_wmi *asus = dev_get_drvdata(dev);
int value;
int state;
int ret;
ret = kstrtouint(buf, 10, &value);
if (ret)
return ret;
value = clamp(value, 0, 255);
state = asus_agfn_fan_speed_write(asus, 1, &value);
if (state)
pr_warn("Setting fan speed failed: %d\n", state);
else
asus->fan_pwm_mode = ASUS_FAN_CTRL_MANUAL;
return count;
}
static ssize_t fan1_input_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int value;
int ret;
switch (asus->fan_type) {
case FAN_TYPE_SPEC83:
ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL,
&value);
if (ret < 0)
return ret;
value &= 0xffff;
break;
case FAN_TYPE_AGFN:
/* no speed readable on manual mode */
if (asus->fan_pwm_mode == ASUS_FAN_CTRL_MANUAL)
return -ENXIO;
ret = asus_agfn_fan_speed_read(asus, 1, &value);
if (ret) {
pr_warn("reading fan speed failed: %d\n", ret);
return -ENXIO;
}
break;
default:
return -ENXIO;
}
return sysfs_emit(buf, "%d\n", value < 0 ? -1 : value * 100);
}
static ssize_t pwm1_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
/*
* Just read back the cached pwm mode.
*
* For the CPU_FAN device, the spec indicates that we should be
* able to read the device status and consult bit 19 to see if we
* are in Full On or Automatic mode. However, this does not work
* in practice on X532FL at least (the bit is always 0) and there's
* also nothing in the DSDT to indicate that this behaviour exists.
*/
return sysfs_emit(buf, "%d\n", asus->fan_pwm_mode);
}
static ssize_t pwm1_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int status = 0;
int state;
int value;
int ret;
u32 retval;
ret = kstrtouint(buf, 10, &state);
if (ret)
return ret;
if (asus->fan_type == FAN_TYPE_SPEC83) {
switch (state) { /* standard documented hwmon values */
case ASUS_FAN_CTRL_FULLSPEED:
value = 1;
break;
case ASUS_FAN_CTRL_AUTO:
value = 0;
break;
default:
return -EINVAL;
}
ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_CPU_FAN_CTRL,
value, &retval);
if (ret)
return ret;
if (retval != 1)
return -EIO;
} else if (asus->fan_type == FAN_TYPE_AGFN) {
switch (state) {
case ASUS_FAN_CTRL_MANUAL:
break;
case ASUS_FAN_CTRL_AUTO:
status = asus_fan_set_auto(asus);
if (status)
return status;
break;
default:
return -EINVAL;
}
}
asus->fan_pwm_mode = state;
/* Must set to disabled if mode is toggled */
if (asus->cpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
if (asus->gpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
if (asus->mid_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_MID].enabled = false;
return count;
}
static ssize_t fan1_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", ASUS_FAN_DESC);
}
static ssize_t asus_hwmon_temp1(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
if (err < 0)
return err;
return sprintf(buf, "%ld\n",
deci_kelvin_to_millicelsius(value & 0xFFFF));
}
/* GPU fan on modern ROG laptops */
static ssize_t fan2_input_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int value;
int ret;
ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL, &value);
if (ret < 0)
return ret;
value &= 0xffff;
return sysfs_emit(buf, "%d\n", value * 100);
}
static ssize_t fan2_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", ASUS_GPU_FAN_DESC);
}
/* Middle/Center fan on modern ROG laptops */
static ssize_t fan3_input_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int value;
int ret;
ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_MID_FAN_CTRL, &value);
if (ret < 0)
return ret;
value &= 0xffff;
return sysfs_emit(buf, "%d\n", value * 100);
}
static ssize_t fan3_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", ASUS_MID_FAN_DESC);
}
static ssize_t pwm2_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", asus->gpu_fan_pwm_mode);
}
static ssize_t pwm2_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int state;
int value;
int ret;
u32 retval;
ret = kstrtouint(buf, 10, &state);
if (ret)
return ret;
switch (state) { /* standard documented hwmon values */
case ASUS_FAN_CTRL_FULLSPEED:
value = 1;
break;
case ASUS_FAN_CTRL_AUTO:
value = 0;
break;
default:
return -EINVAL;
}
ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
value, &retval);
if (ret)
return ret;
if (retval != 1)
return -EIO;
asus->gpu_fan_pwm_mode = state;
return count;
}
static ssize_t pwm3_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", asus->mid_fan_pwm_mode);
}
static ssize_t pwm3_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
int state;
int value;
int ret;
u32 retval;
ret = kstrtouint(buf, 10, &state);
if (ret)
return ret;
switch (state) { /* standard documented hwmon values */
case ASUS_FAN_CTRL_FULLSPEED:
value = 1;
break;
case ASUS_FAN_CTRL_AUTO:
value = 0;
break;
default:
return -EINVAL;
}
ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_MID_FAN_CTRL,
value, &retval);
if (ret)
return ret;
if (retval != 1)
return -EIO;
asus->mid_fan_pwm_mode = state;
return count;
}
/* Fan1 */
static DEVICE_ATTR_RW(pwm1);
static DEVICE_ATTR_RW(pwm1_enable);
static DEVICE_ATTR_RO(fan1_input);
static DEVICE_ATTR_RO(fan1_label);
/* Fan2 - GPU fan */
static DEVICE_ATTR_RW(pwm2_enable);
static DEVICE_ATTR_RO(fan2_input);
static DEVICE_ATTR_RO(fan2_label);
/* Fan3 - Middle/center fan */
static DEVICE_ATTR_RW(pwm3_enable);
static DEVICE_ATTR_RO(fan3_input);
static DEVICE_ATTR_RO(fan3_label);
/* Temperature */
static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
static struct attribute *hwmon_attributes[] = {
&dev_attr_pwm1.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm2_enable.attr,
&dev_attr_pwm3_enable.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_label.attr,
&dev_attr_fan2_input.attr,
&dev_attr_fan2_label.attr,
&dev_attr_fan3_input.attr,
&dev_attr_fan3_label.attr,
&dev_attr_temp1_input.attr,
NULL
};
static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev->parent);
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
if (attr == &dev_attr_pwm1.attr) {
if (asus->fan_type != FAN_TYPE_AGFN)
return 0;
} else if (attr == &dev_attr_fan1_input.attr
|| attr == &dev_attr_fan1_label.attr
|| attr == &dev_attr_pwm1_enable.attr) {
if (asus->fan_type == FAN_TYPE_NONE)
return 0;
} else if (attr == &dev_attr_fan2_input.attr
|| attr == &dev_attr_fan2_label.attr
|| attr == &dev_attr_pwm2_enable.attr) {
if (asus->gpu_fan_type == FAN_TYPE_NONE)
return 0;
} else if (attr == &dev_attr_fan3_input.attr
|| attr == &dev_attr_fan3_label.attr
|| attr == &dev_attr_pwm3_enable.attr) {
if (asus->mid_fan_type == FAN_TYPE_NONE)
return 0;
} else if (attr == &dev_attr_temp1_input.attr) {
int err = asus_wmi_get_devstate(asus,
ASUS_WMI_DEVID_THERMAL_CTRL,
&value);
if (err < 0)
return 0; /* can't return negative here */
/*
* If the temperature value in deci-Kelvin is near the absolute
* zero temperature, something is clearly wrong
*/
if (value == 0 || value == 1)
return 0;
}
return attr->mode;
}
static const struct attribute_group hwmon_attribute_group = {
.is_visible = asus_hwmon_sysfs_is_visible,
.attrs = hwmon_attributes
};
__ATTRIBUTE_GROUPS(hwmon_attribute);
static int asus_wmi_hwmon_init(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
struct device *hwmon;
hwmon = devm_hwmon_device_register_with_groups(dev, "asus", asus,
hwmon_attribute_groups);
if (IS_ERR(hwmon)) {
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
return 0;
}
static int asus_wmi_fan_init(struct asus_wmi *asus)
{
asus->gpu_fan_type = FAN_TYPE_NONE;
asus->mid_fan_type = FAN_TYPE_NONE;
asus->fan_type = FAN_TYPE_NONE;
asus->agfn_pwm = -1;
if (asus->driver->quirks->wmi_ignore_fan)
asus->fan_type = FAN_TYPE_NONE;
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
asus->fan_type = FAN_TYPE_SPEC83;
else if (asus_wmi_has_agfn_fan(asus))
asus->fan_type = FAN_TYPE_AGFN;
/* Modern models like G713 also have GPU fan control */
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL))
asus->gpu_fan_type = FAN_TYPE_SPEC83;
/* Some models also have a center/middle fan */
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MID_FAN_CTRL))
asus->mid_fan_type = FAN_TYPE_SPEC83;
if (asus->fan_type == FAN_TYPE_NONE)
return -ENODEV;
asus_fan_set_auto(asus);
asus->fan_pwm_mode = ASUS_FAN_CTRL_AUTO;
return 0;
}
/* Fan mode *******************************************************************/
static int fan_boost_mode_check_present(struct asus_wmi *asus)
{
u32 result;
int err;
asus->fan_boost_mode_available = false;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_BOOST_MODE,
&result);
if (err) {
if (err == -ENODEV)
return 0;
else
return err;
}
if ((result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
(result & ASUS_FAN_BOOST_MODES_MASK)) {
asus->fan_boost_mode_available = true;
asus->fan_boost_mode_mask = result & ASUS_FAN_BOOST_MODES_MASK;
}
return 0;
}
static int fan_boost_mode_write(struct asus_wmi *asus)
{
u32 retval;
u8 value;
int err;
value = asus->fan_boost_mode;
pr_info("Set fan boost mode: %u\n", value);
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_FAN_BOOST_MODE, value,
&retval);
sysfs_notify(&asus->platform_device->dev.kobj, NULL,
"fan_boost_mode");
if (err) {
pr_warn("Failed to set fan boost mode: %d\n", err);
return err;
}
if (retval != 1) {
pr_warn("Failed to set fan boost mode (retval): 0x%x\n",
retval);
return -EIO;
}
return 0;
}
static int fan_boost_mode_switch_next(struct asus_wmi *asus)
{
u8 mask = asus->fan_boost_mode_mask;
if (asus->fan_boost_mode == ASUS_FAN_BOOST_MODE_NORMAL) {
if (mask & ASUS_FAN_BOOST_MODE_OVERBOOST_MASK)
asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_OVERBOOST;
else if (mask & ASUS_FAN_BOOST_MODE_SILENT_MASK)
asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_SILENT;
} else if (asus->fan_boost_mode == ASUS_FAN_BOOST_MODE_OVERBOOST) {
if (mask & ASUS_FAN_BOOST_MODE_SILENT_MASK)
asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_SILENT;
else
asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_NORMAL;
} else {
asus->fan_boost_mode = ASUS_FAN_BOOST_MODE_NORMAL;
}
return fan_boost_mode_write(asus);
}
static ssize_t fan_boost_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", asus->fan_boost_mode);
}
static ssize_t fan_boost_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 mask = asus->fan_boost_mode_mask;
u8 new_mode;
int result;
result = kstrtou8(buf, 10, &new_mode);
if (result < 0) {
pr_warn("Trying to store invalid value\n");
return result;
}
if (new_mode == ASUS_FAN_BOOST_MODE_OVERBOOST) {
if (!(mask & ASUS_FAN_BOOST_MODE_OVERBOOST_MASK))
return -EINVAL;
} else if (new_mode == ASUS_FAN_BOOST_MODE_SILENT) {
if (!(mask & ASUS_FAN_BOOST_MODE_SILENT_MASK))
return -EINVAL;
} else if (new_mode != ASUS_FAN_BOOST_MODE_NORMAL) {
return -EINVAL;
}
asus->fan_boost_mode = new_mode;
fan_boost_mode_write(asus);
return count;
}
// Fan boost mode: 0 - normal, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(fan_boost_mode);
/* Custom fan curves **********************************************************/
static void fan_curve_copy_from_buf(struct fan_curve_data *data, u8 *buf)
{
int i;
for (i = 0; i < FAN_CURVE_POINTS; i++) {
data->temps[i] = buf[i];
}
for (i = 0; i < FAN_CURVE_POINTS; i++) {
data->percents[i] =
255 * buf[i + FAN_CURVE_POINTS] / 100;
}
}
static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
{
struct fan_curve_data *curves;
u8 buf[FAN_CURVE_BUF_LEN];
int err, fan_idx;
u8 mode = 0;
if (asus->throttle_thermal_policy_available)
mode = asus->throttle_thermal_policy_mode;
/* DEVID_<C/G>PU_FAN_CURVE is switched for OVERBOOST vs SILENT */
if (mode == 2)
mode = 1;
else if (mode == 1)
mode = 2;
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
if (err) {
pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
return err;
}
fan_idx = FAN_CURVE_DEV_CPU;
if (fan_dev == ASUS_WMI_DEVID_GPU_FAN_CURVE)
fan_idx = FAN_CURVE_DEV_GPU;
if (fan_dev == ASUS_WMI_DEVID_MID_FAN_CURVE)
fan_idx = FAN_CURVE_DEV_MID;
curves = &asus->custom_fan_curves[fan_idx];
curves->device_id = fan_dev;
fan_curve_copy_from_buf(curves, buf);
return 0;
}
/* Check if capability exists, and populate defaults */
static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
u32 fan_dev)
{
int err;
*available = false;
if (asus->fan_type == FAN_TYPE_NONE)
return 0;
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
return 0;
}
*available = true;
return 0;
}
/* Determine which fan the attribute is for if SENSOR_ATTR */
static struct fan_curve_data *fan_curve_attr_select(struct asus_wmi *asus,
struct device_attribute *attr)
{
int index = to_sensor_dev_attr(attr)->index;
return &asus->custom_fan_curves[index];
}
/* Determine which fan the attribute is for if SENSOR_ATTR_2 */
static struct fan_curve_data *fan_curve_attr_2_select(struct asus_wmi *asus,
struct device_attribute *attr)
{
int nr = to_sensor_dev_attr_2(attr)->nr;
return &asus->custom_fan_curves[nr & ~FAN_CURVE_PWM_MASK];
}
static ssize_t fan_curve_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
int value, pwm, index;
data = fan_curve_attr_2_select(asus, attr);
pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
index = dev_attr->index;
if (pwm)
value = data->percents[index];
else
value = data->temps[index];
return sysfs_emit(buf, "%d\n", value);
}
/*
* "fan_dev" is the related WMI method such as ASUS_WMI_DEVID_CPU_FAN_CURVE.
*/
static int fan_curve_write(struct asus_wmi *asus,
struct fan_curve_data *data)
{
u32 arg1 = 0, arg2 = 0, arg3 = 0, arg4 = 0;
u8 *percents = data->percents;
u8 *temps = data->temps;
int ret, i, shift = 0;
if (!data->enabled)
return 0;
for (i = 0; i < FAN_CURVE_POINTS / 2; i++) {
arg1 += (temps[i]) << shift;
arg2 += (temps[i + 4]) << shift;
/* Scale to percentage for device */
arg3 += (100 * percents[i] / 255) << shift;
arg4 += (100 * percents[i + 4] / 255) << shift;
shift += 8;
}
return asus_wmi_evaluate_method5(ASUS_WMI_METHODID_DEVS,
data->device_id,
arg1, arg2, arg3, arg4, &ret);
}
static ssize_t fan_curve_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
int err, pwm, index;
u8 value;
data = fan_curve_attr_2_select(asus, attr);
pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
index = dev_attr->index;
err = kstrtou8(buf, 10, &value);
if (err < 0)
return err;
if (pwm)
data->percents[index] = value;
else
data->temps[index] = value;
/*
* Mark as disabled so the user has to explicitly enable to apply a
* changed fan curve. This prevents potential lockups from writing out
* many changes as one-write-per-change.
*/
data->enabled = false;
return count;
}
static ssize_t fan_curve_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
int out = 2;
data = fan_curve_attr_select(asus, attr);
if (data->enabled)
out = 1;
return sysfs_emit(buf, "%d\n", out);
}
static ssize_t fan_curve_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
int value, err;
data = fan_curve_attr_select(asus, attr);
err = kstrtoint(buf, 10, &value);
if (err < 0)
return err;
switch (value) {
case 1:
data->enabled = true;
break;
case 2:
data->enabled = false;
break;
/*
* Auto + reset the fan curve data to defaults. Make it an explicit
* option so that users don't accidentally overwrite a set fan curve.
*/
case 3:
err = fan_curve_get_factory_default(asus, data->device_id);
if (err)
return err;
data->enabled = false;
break;
default:
return -EINVAL;
}
if (data->enabled) {
err = fan_curve_write(asus, data);
if (err)
return err;
} else {
/*
* For machines with throttle this is the only way to reset fans
* to default mode of operation (does not erase curve data).
*/
if (asus->throttle_thermal_policy_available) {
err = throttle_thermal_policy_write(asus);
if (err)
return err;
/* Similar is true for laptops with this fan */
} else if (asus->fan_type == FAN_TYPE_SPEC83) {
err = asus_fan_set_auto(asus);
if (err)
return err;
} else {
/* Safeguard against fautly ACPI tables */
err = fan_curve_get_factory_default(asus, data->device_id);
if (err)
return err;
err = fan_curve_write(asus, data);
if (err)
return err;
}
}
return count;
}
/* CPU */
static SENSOR_DEVICE_ATTR_RW(pwm1_enable, fan_curve_enable, FAN_CURVE_DEV_CPU);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_temp, fan_curve,
FAN_CURVE_DEV_CPU, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_temp, fan_curve,
FAN_CURVE_DEV_CPU, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_temp, fan_curve,
FAN_CURVE_DEV_CPU, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_temp, fan_curve,
FAN_CURVE_DEV_CPU, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_temp, fan_curve,
FAN_CURVE_DEV_CPU, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_temp, fan_curve,
FAN_CURVE_DEV_CPU, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_temp, fan_curve,
FAN_CURVE_DEV_CPU, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, fan_curve,
FAN_CURVE_DEV_CPU, 7);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point4_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point5_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point6_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point7_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 7);
/* GPU */
static SENSOR_DEVICE_ATTR_RW(pwm2_enable, fan_curve_enable, FAN_CURVE_DEV_GPU);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_temp, fan_curve,
FAN_CURVE_DEV_GPU, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_temp, fan_curve,
FAN_CURVE_DEV_GPU, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_temp, fan_curve,
FAN_CURVE_DEV_GPU, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_temp, fan_curve,
FAN_CURVE_DEV_GPU, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_temp, fan_curve,
FAN_CURVE_DEV_GPU, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_temp, fan_curve,
FAN_CURVE_DEV_GPU, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_temp, fan_curve,
FAN_CURVE_DEV_GPU, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_temp, fan_curve,
FAN_CURVE_DEV_GPU, 7);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point1_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point2_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point3_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point4_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point5_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point6_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 7);
/* MID */
static SENSOR_DEVICE_ATTR_RW(pwm3_enable, fan_curve_enable, FAN_CURVE_DEV_MID);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp, fan_curve,
FAN_CURVE_DEV_MID, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp, fan_curve,
FAN_CURVE_DEV_MID, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp, fan_curve,
FAN_CURVE_DEV_MID, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp, fan_curve,
FAN_CURVE_DEV_MID, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp, fan_curve,
FAN_CURVE_DEV_MID, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp, fan_curve,
FAN_CURVE_DEV_MID, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp, fan_curve,
FAN_CURVE_DEV_MID, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp, fan_curve,
FAN_CURVE_DEV_MID, 7);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 2);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 3);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 4);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 5);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 6);
static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_pwm, fan_curve,
FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 7);
static struct attribute *asus_fan_curve_attr[] = {
/* CPU */
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
/* GPU */
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point6_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point7_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point8_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr,
/* MID */
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point6_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point7_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point8_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point5_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point6_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point7_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point8_pwm.dev_attr.attr,
NULL
};
static umode_t asus_fan_curve_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev->parent);
/*
* Check the char instead of casting attr as there are two attr types
* involved here (attr1 and attr2)
*/
if (asus->cpu_fan_curve_available && attr->name[3] == '1')
return 0644;
if (asus->gpu_fan_curve_available && attr->name[3] == '2')
return 0644;
if (asus->mid_fan_curve_available && attr->name[3] == '3')
return 0644;
return 0;
}
static const struct attribute_group asus_fan_curve_attr_group = {
.is_visible = asus_fan_curve_is_visible,
.attrs = asus_fan_curve_attr,
};
__ATTRIBUTE_GROUPS(asus_fan_curve_attr);
/*
* Must be initialised after throttle_thermal_policy_check_present() as
* we check the status of throttle_thermal_policy_available during init.
*/
static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
struct device *hwmon;
int err;
err = fan_curve_check_present(asus, &asus->cpu_fan_curve_available,
ASUS_WMI_DEVID_CPU_FAN_CURVE);
if (err)
return err;
err = fan_curve_check_present(asus, &asus->gpu_fan_curve_available,
ASUS_WMI_DEVID_GPU_FAN_CURVE);
if (err)
return err;
err = fan_curve_check_present(asus, &asus->mid_fan_curve_available,
ASUS_WMI_DEVID_MID_FAN_CURVE);
if (err)
return err;
if (!asus->cpu_fan_curve_available
&& !asus->gpu_fan_curve_available
&& !asus->mid_fan_curve_available)
return 0;
hwmon = devm_hwmon_device_register_with_groups(
dev, "asus_custom_fan_curve", asus, asus_fan_curve_attr_groups);
if (IS_ERR(hwmon)) {
dev_err(dev,
"Could not register asus_custom_fan_curve device\n");
return PTR_ERR(hwmon);
}
return 0;
}
/* Throttle thermal policy ****************************************************/
static int throttle_thermal_policy_check_present(struct asus_wmi *asus)
{
u32 result;
int err;
asus->throttle_thermal_policy_available = false;
err = asus_wmi_get_devstate(asus,
ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
&result);
if (err) {
if (err == -ENODEV)
return 0;
return err;
}
if (result & ASUS_WMI_DSTS_PRESENCE_BIT)
asus->throttle_thermal_policy_available = true;
return 0;
}
static int throttle_thermal_policy_write(struct asus_wmi *asus)
{
int err;
u8 value;
u32 retval;
value = asus->throttle_thermal_policy_mode;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
value, &retval);
sysfs_notify(&asus->platform_device->dev.kobj, NULL,
"throttle_thermal_policy");
if (err) {
pr_warn("Failed to set throttle thermal policy: %d\n", err);
return err;
}
if (retval != 1) {
pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n",
retval);
return -EIO;
}
/* Must set to disabled if mode is toggled */
if (asus->cpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
if (asus->gpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
if (asus->mid_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_MID].enabled = false;
return 0;
}
static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
{
if (!asus->throttle_thermal_policy_available)
return 0;
asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
return throttle_thermal_policy_write(asus);
}
static int throttle_thermal_policy_switch_next(struct asus_wmi *asus)
{
u8 new_mode = asus->throttle_thermal_policy_mode + 1;
int err;
if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
asus->throttle_thermal_policy_mode = new_mode;
err = throttle_thermal_policy_write(asus);
if (err)
return err;
/*
* Ensure that platform_profile updates userspace with the change to ensure
* that platform_profile and throttle_thermal_policy_mode are in sync.
*/
platform_profile_notify();
return 0;
}
static ssize_t throttle_thermal_policy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 mode = asus->throttle_thermal_policy_mode;
return sysfs_emit(buf, "%d\n", mode);
}
static ssize_t throttle_thermal_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 new_mode;
int result;
int err;
result = kstrtou8(buf, 10, &new_mode);
if (result < 0)
return result;
if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT)
return -EINVAL;
asus->throttle_thermal_policy_mode = new_mode;
err = throttle_thermal_policy_write(asus);
if (err)
return err;
/*
* Ensure that platform_profile updates userspace with the change to ensure
* that platform_profile and throttle_thermal_policy_mode are in sync.
*/
platform_profile_notify();
return count;
}
// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
struct asus_wmi *asus;
int tp;
asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
tp = asus->throttle_thermal_policy_mode;
switch (tp) {
case ASUS_THROTTLE_THERMAL_POLICY_DEFAULT:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST:
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case ASUS_THROTTLE_THERMAL_POLICY_SILENT:
*profile = PLATFORM_PROFILE_QUIET;
break;
default:
return -EINVAL;
}
return 0;
}
static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct asus_wmi *asus;
int tp;
asus = container_of(pprof, struct asus_wmi, platform_profile_handler);
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
tp = ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST;
break;
case PLATFORM_PROFILE_BALANCED:
tp = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT;
break;
case PLATFORM_PROFILE_QUIET:
tp = ASUS_THROTTLE_THERMAL_POLICY_SILENT;
break;
default:
return -EOPNOTSUPP;
}
asus->throttle_thermal_policy_mode = tp;
return throttle_thermal_policy_write(asus);
}
static int platform_profile_setup(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
int err;
/*
* Not an error if a component platform_profile relies on is unavailable
* so early return, skipping the setup of platform_profile.
*/
if (!asus->throttle_thermal_policy_available)
return 0;
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED,
asus->platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE,
asus->platform_profile_handler.choices);
err = platform_profile_register(&asus->platform_profile_handler);
if (err)
return err;
asus->platform_profile_support = true;
return 0;
}
/* Backlight ******************************************************************/
static int read_backlight_power(struct asus_wmi *asus)
{
int ret;
if (asus->driver->quirks->store_backlight_power)
ret = !asus->driver->panel_power;
else
ret = asus_wmi_get_devstate_simple(asus,
ASUS_WMI_DEVID_BACKLIGHT);
if (ret < 0)
return ret;
return ret ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
}
static int read_brightness_max(struct asus_wmi *asus)
{
u32 retval;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
if (err < 0)
return err;
retval = retval & ASUS_WMI_DSTS_MAX_BRIGTH_MASK;
retval >>= 8;
if (!retval)
return -ENODEV;
return retval;
}
static int read_brightness(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 retval;
int err;
err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_BRIGHTNESS, &retval);
if (err < 0)
return err;
return retval & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
static u32 get_scalar_command(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 ctrl_param = 0;
if ((asus->driver->brightness < bd->props.brightness) ||
bd->props.brightness == bd->props.max_brightness)
ctrl_param = 0x00008001;
else if ((asus->driver->brightness > bd->props.brightness) ||
bd->props.brightness == 0)
ctrl_param = 0x00008000;
asus->driver->brightness = bd->props.brightness;
return ctrl_param;
}
static int update_bl_status(struct backlight_device *bd)
{
struct asus_wmi *asus = bl_get_data(bd);
u32 ctrl_param;
int power, err = 0;
power = read_backlight_power(asus);
if (power != -ENODEV && bd->props.power != power) {
ctrl_param = !!(bd->props.power == FB_BLANK_UNBLANK);
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT,
ctrl_param, NULL);
if (asus->driver->quirks->store_backlight_power)
asus->driver->panel_power = bd->props.power;
/* When using scalar brightness, updating the brightness
* will mess with the backlight power */
if (asus->driver->quirks->scalar_panel_brightness)
return err;
}
if (asus->driver->quirks->scalar_panel_brightness)
ctrl_param = get_scalar_command(bd);
else
ctrl_param = bd->props.brightness;
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BRIGHTNESS,
ctrl_param, NULL);
return err;
}
static const struct backlight_ops asus_wmi_bl_ops = {
.get_brightness = read_brightness,
.update_status = update_bl_status,
};
static int asus_wmi_backlight_notify(struct asus_wmi *asus, int code)
{
struct backlight_device *bd = asus->backlight_device;
int old = bd->props.brightness;
int new = old;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
new = code - NOTIFY_BRNUP_MIN + 1;
else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
new = code - NOTIFY_BRNDOWN_MIN;
bd->props.brightness = new;
backlight_update_status(bd);
backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
return old;
}
static int asus_wmi_backlight_init(struct asus_wmi *asus)
{
struct backlight_device *bd;
struct backlight_properties props;
int max;
int power;
max = read_brightness_max(asus);
if (max < 0)
return max;
power = read_backlight_power(asus);
if (power == -ENODEV)
power = FB_BLANK_UNBLANK;
else if (power < 0)
return power;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max;
bd = backlight_device_register(asus->driver->name,
&asus->platform_device->dev, asus,
&asus_wmi_bl_ops, &props);
if (IS_ERR(bd)) {
pr_err("Could not register backlight device\n");
return PTR_ERR(bd);
}
asus->backlight_device = bd;
if (asus->driver->quirks->store_backlight_power)
asus->driver->panel_power = power;
bd->props.brightness = read_brightness(bd);
bd->props.power = power;
backlight_update_status(bd);
asus->driver->brightness = bd->props.brightness;
return 0;
}
static void asus_wmi_backlight_exit(struct asus_wmi *asus)
{
backlight_device_unregister(asus->backlight_device);
asus->backlight_device = NULL;
}
static int is_display_toggle(int code)
{
/* display toggle keys */
if ((code >= 0x61 && code <= 0x67) ||
(code >= 0x8c && code <= 0x93) ||
(code >= 0xa0 && code <= 0xa7) ||
(code >= 0xd0 && code <= 0xd5))
return 1;
return 0;
}
/* Fn-lock ********************************************************************/
static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus)
{
u32 result;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result);
return (result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
!(result & ASUS_WMI_FNLOCK_BIOS_DISABLED);
}
static void asus_wmi_fnlock_update(struct asus_wmi *asus)
{
int mode = asus->fnlock_locked;
asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL);
}
/* WMI events *****************************************************************/
static int asus_wmi_get_event_code(u32 value)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int code;
status = wmi_get_event_data(value, &response);
if (ACPI_FAILURE(status)) {
pr_warn("Failed to get WMI notify code: %s\n",
acpi_format_exception(status));
return -EIO;
}
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
code = (int)(obj->integer.value & WMI_EVENT_MASK);
else
code = -EIO;
kfree(obj);
return code;
}
static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
&autorelease);
if (code == ASUS_WMI_KEY_IGNORE)
return;
}
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = ASUS_WMI_BRN_UP;
else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
code = ASUS_WMI_BRN_DOWN;
if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
asus_wmi_backlight_notify(asus, orig_code);
return;
}
}
if (code == NOTIFY_KBD_BRTUP) {
kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
return;
}
if (code == NOTIFY_KBD_BRTDWN) {
kbd_led_set_by_kbd(asus, asus->kbd_led_wk - 1);
return;
}
if (code == NOTIFY_KBD_BRTTOGGLE) {
if (asus->kbd_led_wk == asus->kbd_led.max_brightness)
kbd_led_set_by_kbd(asus, 0);
else
kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1);
return;
}
if (code == NOTIFY_FNLOCK_TOGGLE) {
asus->fnlock_locked = !asus->fnlock_locked;
asus_wmi_fnlock_update(asus);
return;
}
if (code == asus->tablet_switch_event_code) {
asus_wmi_tablet_mode_get_state(asus);
return;
}
if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) {
if (asus->fan_boost_mode_available)
fan_boost_mode_switch_next(asus);
if (asus->throttle_thermal_policy_available)
throttle_thermal_policy_switch_next(asus);
return;
}
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
return;
if (!sparse_keymap_report_event(asus->inputdev, code,
key_value, autorelease))
pr_info("Unknown key code 0x%x\n", code);
}
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
int code;
int i;
for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
code = asus_wmi_get_event_code(value);
if (code < 0) {
pr_warn("Failed to get notify code: %d\n", code);
return;
}
if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
return;
asus_wmi_handle_event_code(code, asus);
/*
* Double check that queue is present:
* ATK (with queue) uses 0xff, ASUSWMI (without) 0xd2.
*/
if (!asus->wmi_event_queue || value != WMI_EVENT_VALUE_ATK)
return;
}
pr_warn("Failed to process event queue, last code: 0x%x\n", code);
}
static int asus_wmi_notify_queue_flush(struct asus_wmi *asus)
{
int code;
int i;
for (i = 0; i < WMI_EVENT_QUEUE_SIZE + 1; i++) {
code = asus_wmi_get_event_code(WMI_EVENT_VALUE_ATK);
if (code < 0) {
pr_warn("Failed to get event during flush: %d\n", code);
return code;
}
if (code == WMI_EVENT_QUEUE_END || code == WMI_EVENT_MASK)
return 0;
}
pr_warn("Failed to flush event queue\n");
return -EIO;
}
/* Sysfs **********************************************************************/
static ssize_t store_sys_wmi(struct asus_wmi *asus, int devid,
const char *buf, size_t count)
{
u32 retval;
int err, value;
value = asus_wmi_get_devstate_simple(asus, devid);
if (value < 0)
return value;
err = kstrtoint(buf, 0, &value);
if (err)
return err;
err = asus_wmi_set_devstate(devid, value, &retval);
if (err < 0)
return err;
return count;
}
static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
{
int value = asus_wmi_get_devstate_simple(asus, devid);
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
#define ASUS_WMI_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct asus_wmi *asus = dev_get_drvdata(dev); \
\
return show_sys_wmi(asus, _cm, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct asus_wmi *asus = dev_get_drvdata(dev); \
\
return store_sys_wmi(asus, _cm, buf, count); \
} \
static struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
.mode = _mode }, \
.show = show_##_name, \
.store = store_##_name, \
}
ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
static ssize_t cpufv_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int value, rv;
rv = kstrtoint(buf, 0, &value);
if (rv)
return rv;
if (value < 0 || value > 2)
return -EINVAL;
rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
if (rv < 0)
return rv;
return count;
}
static DEVICE_ATTR_WO(cpufv);
static struct attribute *platform_attributes[] = {
&dev_attr_cpufv.attr,
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
&dev_attr_charge_mode.attr,
&dev_attr_egpu_enable.attr,
&dev_attr_egpu_connected.attr,
&dev_attr_dgpu_disable.attr,
&dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
&dev_attr_throttle_thermal_policy.attr,
&dev_attr_ppt_pl2_sppt.attr,
&dev_attr_ppt_pl1_spl.attr,
&dev_attr_ppt_fppt.attr,
&dev_attr_ppt_apu_sppt.attr,
&dev_attr_ppt_platform_sppt.attr,
&dev_attr_nv_dynamic_boost.attr,
&dev_attr_nv_temp_target.attr,
&dev_attr_panel_od.attr,
&dev_attr_mini_led_mode.attr,
NULL
};
static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev);
bool ok = true;
int devid = -1;
if (attr == &dev_attr_camera.attr)
devid = ASUS_WMI_DEVID_CAMERA;
else if (attr == &dev_attr_cardr.attr)
devid = ASUS_WMI_DEVID_CARDREADER;
else if (attr == &dev_attr_touchpad.attr)
devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
else if (attr == &dev_attr_charge_mode.attr)
ok = asus->charge_mode_available;
else if (attr == &dev_attr_egpu_enable.attr)
ok = asus->egpu_enable_available;
else if (attr == &dev_attr_egpu_connected.attr)
ok = asus->egpu_connect_available;
else if (attr == &dev_attr_dgpu_disable.attr)
ok = asus->dgpu_disable_available;
else if (attr == &dev_attr_gpu_mux_mode.attr)
ok = asus->gpu_mux_mode_available;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
ok = asus->throttle_thermal_policy_available;
else if (attr == &dev_attr_ppt_pl2_sppt.attr)
ok = asus->ppt_pl2_sppt_available;
else if (attr == &dev_attr_ppt_pl1_spl.attr)
ok = asus->ppt_pl1_spl_available;
else if (attr == &dev_attr_ppt_fppt.attr)
ok = asus->ppt_fppt_available;
else if (attr == &dev_attr_ppt_apu_sppt.attr)
ok = asus->ppt_apu_sppt_available;
else if (attr == &dev_attr_ppt_platform_sppt.attr)
ok = asus->ppt_plat_sppt_available;
else if (attr == &dev_attr_nv_dynamic_boost.attr)
ok = asus->nv_dyn_boost_available;
else if (attr == &dev_attr_nv_temp_target.attr)
ok = asus->nv_temp_tgt_available;
else if (attr == &dev_attr_panel_od.attr)
ok = asus->panel_overdrive_available;
else if (attr == &dev_attr_mini_led_mode.attr)
ok = asus->mini_led_mode_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
return ok ? attr->mode : 0;
}
static const struct attribute_group platform_attribute_group = {
.is_visible = asus_sysfs_is_visible,
.attrs = platform_attributes
};
static void asus_wmi_sysfs_exit(struct platform_device *device)
{
sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
}
static int asus_wmi_sysfs_init(struct platform_device *device)
{
return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
}
/* Platform device ************************************************************/
static int asus_wmi_platform_init(struct asus_wmi *asus)
{
struct device *dev = &asus->platform_device->dev;
char *wmi_uid;
int rv;
/* INIT enable hotkeys on some models */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_INIT, 0, 0, &rv))
pr_info("Initialization: %#x\n", rv);
/* We don't know yet what to do with this version... */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
pr_info("BIOS WMI version: %d.%d\n", rv >> 16, rv & 0xFF);
asus->spec = rv;
}
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SFUN, 0, 0, &rv)) {
pr_info("SFUN value: %#x\n", rv);
asus->sfun = rv;
}
/*
* Eee PC and Notebooks seems to have different method_id for DSTS,
* but it may also be related to the BIOS's SPEC.
* Note, on most Eeepc, there is no way to check if a method exist
* or note, while on notebooks, they returns 0xFFFFFFFE on failure,
* but once again, SPEC may probably be used for that kind of things.
*
* Additionally at least TUF Gaming series laptops return nothing for
* unknown methods, so the detection in this way is not possible.
*
* There is strong indication that only ACPI WMI devices that have _UID
* equal to "ASUSWMI" use DCTS whereas those with "ATK" use DSTS.
*/
wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID);
if (!wmi_uid)
return -ENODEV;
if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI)) {
dev_info(dev, "Detected ASUSWMI, use DCTS\n");
asus->dsts_id = ASUS_WMI_METHODID_DCTS;
} else {
dev_info(dev, "Detected %s, not ASUSWMI, use DSTS\n", wmi_uid);
asus->dsts_id = ASUS_WMI_METHODID_DSTS;
}
/*
* Some devices can have multiple event codes stored in a queue before
* the module load if it was unloaded intermittently after calling
* the INIT method (enables event handling). The WMI notify handler is
* expected to retrieve all event codes until a retrieved code equals
* queue end marker (One or Ones). Old codes are flushed from the queue
* upon module load. Not enabling this when it should be has minimal
* visible impact so fall back if anything goes wrong.
*/
wmi_uid = wmi_get_acpi_device_uid(asus->driver->event_guid);
if (wmi_uid && !strcmp(wmi_uid, ASUS_ACPI_UID_ATK)) {
dev_info(dev, "Detected ATK, enable event queue\n");
if (!asus_wmi_notify_queue_flush(asus))
asus->wmi_event_queue = true;
}
/* CWAP allow to define the behavior of the Fn+F2 key,
* this method doesn't seems to be present on Eee PCs */
if (asus->driver->quirks->wapf >= 0)
asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
asus->driver->quirks->wapf, NULL);
return 0;
}
/* debugfs ********************************************************************/
struct asus_wmi_debugfs_node {
struct asus_wmi *asus;
char *name;
int (*show) (struct seq_file *m, void *data);
};
static int show_dsts(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
int err;
u32 retval = -1;
err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
if (err < 0)
return err;
seq_printf(m, "DSTS(%#x) = %#x\n", asus->debug.dev_id, retval);
return 0;
}
static int show_devs(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
int err;
u32 retval = -1;
err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
&retval);
if (err < 0)
return err;
seq_printf(m, "DEVS(%#x, %#x) = %#x\n", asus->debug.dev_id,
asus->debug.ctrl_param, retval);
return 0;
}
static int show_call(struct seq_file *m, void *data)
{
struct asus_wmi *asus = m->private;
struct bios_args args = {
.arg0 = asus->debug.dev_id,
.arg1 = asus->debug.ctrl_param,
};
struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
0, asus->debug.method_id,
&input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
seq_printf(m, "%#x(%#x, %#x) = %#x\n", asus->debug.method_id,
asus->debug.dev_id, asus->debug.ctrl_param,
(u32) obj->integer.value);
else
seq_printf(m, "%#x(%#x, %#x) = t:%d\n", asus->debug.method_id,
asus->debug.dev_id, asus->debug.ctrl_param,
obj ? obj->type : -1);
kfree(obj);
return 0;
}
static struct asus_wmi_debugfs_node asus_wmi_debug_files[] = {
{NULL, "devs", show_devs},
{NULL, "dsts", show_dsts},
{NULL, "call", show_call},
};
static int asus_wmi_debugfs_open(struct inode *inode, struct file *file)
{
struct asus_wmi_debugfs_node *node = inode->i_private;
return single_open(file, node->show, node->asus);
}
static const struct file_operations asus_wmi_debugfs_io_ops = {
.owner = THIS_MODULE,
.open = asus_wmi_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void asus_wmi_debugfs_exit(struct asus_wmi *asus)
{
debugfs_remove_recursive(asus->debug.root);
}
static void asus_wmi_debugfs_init(struct asus_wmi *asus)
{
int i;
asus->debug.root = debugfs_create_dir(asus->driver->name, NULL);
debugfs_create_x32("method_id", S_IRUGO | S_IWUSR, asus->debug.root,
&asus->debug.method_id);
debugfs_create_x32("dev_id", S_IRUGO | S_IWUSR, asus->debug.root,
&asus->debug.dev_id);
debugfs_create_x32("ctrl_param", S_IRUGO | S_IWUSR, asus->debug.root,
&asus->debug.ctrl_param);
for (i = 0; i < ARRAY_SIZE(asus_wmi_debug_files); i++) {
struct asus_wmi_debugfs_node *node = &asus_wmi_debug_files[i];
node->asus = asus;
debugfs_create_file(node->name, S_IFREG | S_IRUGO,
asus->debug.root, node,
&asus_wmi_debugfs_io_ops);
}
}
/* Init / exit ****************************************************************/
static int asus_wmi_add(struct platform_device *pdev)
{
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
struct asus_wmi *asus;
acpi_status status;
int err;
u32 result;
asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
if (!asus)
return -ENOMEM;
asus->driver = wdrv;
asus->platform_device = pdev;
wdrv->platform_device = pdev;
platform_set_drvdata(asus->platform_device, asus);
if (wdrv->detect_quirks)
wdrv->detect_quirks(asus->driver);
err = asus_wmi_platform_init(asus);
if (err)
goto fail_platform;
asus->charge_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CHARGE_MODE);
asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT);
asus->ppt_pl1_spl_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL1_SPL);
asus->ppt_fppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_FPPT);
asus->ppt_apu_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_APU_SPPT);
asus->ppt_plat_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PLAT_SPPT);
asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
err = throttle_thermal_policy_check_present(asus);
if (err)
goto fail_throttle_thermal_policy;
else
throttle_thermal_policy_set_default(asus);
err = platform_profile_setup(asus);
if (err)
goto fail_platform_profile_setup;
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
err = asus_wmi_input_init(asus);
if (err)
goto fail_input;
err = asus_wmi_fan_init(asus); /* probably no problems on error */
err = asus_wmi_hwmon_init(asus);
if (err)
goto fail_hwmon;
err = asus_wmi_custom_fan_curve_init(asus);
if (err)
goto fail_custom_fan_curve;
err = asus_wmi_led_init(asus);
if (err)
goto fail_leds;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
asus->driver->wlan_ctrl_by_user = 1;
if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
err = asus_wmi_rfkill_init(asus);
if (err)
goto fail_rfkill;
}
if (asus->driver->quirks->wmi_force_als_set)
asus_wmi_set_als();
if (asus->driver->quirks->xusb2pr)
asus_wmi_set_xusb2pr(asus);
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
} else if (asus->driver->quirks->wmi_backlight_set_devstate)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {
asus->fnlock_locked = fnlock_default;
asus_wmi_fnlock_update(asus);
}
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
if (ACPI_FAILURE(status)) {
pr_err("Unable to register notify handler - %d\n", status);
err = -ENODEV;
goto fail_wmi_handler;
}
asus_wmi_battery_init(asus);
asus_wmi_debugfs_init(asus);
return 0;
fail_wmi_handler:
asus_wmi_backlight_exit(asus);
fail_backlight:
asus_wmi_rfkill_exit(asus);
fail_rfkill:
asus_wmi_led_exit(asus);
fail_leds:
fail_hwmon:
asus_wmi_input_exit(asus);
fail_input:
asus_wmi_sysfs_exit(asus->platform_device);
fail_sysfs:
fail_throttle_thermal_policy:
fail_custom_fan_curve:
fail_platform_profile_setup:
if (asus->platform_profile_support)
platform_profile_remove();
fail_fan_boost_mode:
fail_platform:
kfree(asus);
return err;
}
static int asus_wmi_remove(struct platform_device *device)
{
struct asus_wmi *asus;
asus = platform_get_drvdata(device);
wmi_remove_notify_handler(asus->driver->event_guid);
asus_wmi_backlight_exit(asus);
asus_wmi_input_exit(asus);
asus_wmi_led_exit(asus);
asus_wmi_rfkill_exit(asus);
asus_wmi_debugfs_exit(asus);
asus_wmi_sysfs_exit(asus->platform_device);
asus_fan_set_auto(asus);
throttle_thermal_policy_set_default(asus);
asus_wmi_battery_exit(asus);
if (asus->platform_profile_support)
platform_profile_remove();
kfree(asus);
return 0;
}
/* Platform driver - hibernate/resume callbacks *******************************/
static int asus_hotk_thaw(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
if (asus->wlan.rfkill) {
bool wlan;
/*
* Work around bios bug - acpi _PTS turns off the wireless led
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
wlan = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
asus_wmi_set_devstate(ASUS_WMI_DEVID_WLAN, wlan, NULL);
}
return 0;
}
static int asus_hotk_resume(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
asus_wmi_tablet_mode_get_state(asus);
return 0;
}
static int asus_hotk_restore(struct device *device)
{
struct asus_wmi *asus = dev_get_drvdata(device);
int bl;
/* Refresh both wlan rfkill state and pci hotplug */
if (asus->wlan.rfkill)
asus_rfkill_hotplug(asus);
if (asus->bluetooth.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus,
ASUS_WMI_DEVID_BLUETOOTH);
rfkill_set_sw_state(asus->bluetooth.rfkill, bl);
}
if (asus->wimax.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WIMAX);
rfkill_set_sw_state(asus->wimax.rfkill, bl);
}
if (asus->wwan3g.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
}
if (asus->gps.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
rfkill_set_sw_state(asus->gps.rfkill, bl);
}
if (asus->uwb.rfkill) {
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
rfkill_set_sw_state(asus->uwb.rfkill, bl);
}
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus);
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
asus_wmi_tablet_mode_get_state(asus);
return 0;
}
static const struct dev_pm_ops asus_pm_ops = {
.thaw = asus_hotk_thaw,
.restore = asus_hotk_restore,
.resume = asus_hotk_resume,
};
/* Registration ***************************************************************/
static int asus_wmi_probe(struct platform_device *pdev)
{
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
int ret;
if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) {
pr_warn("ASUS Management GUID not found\n");
return -ENODEV;
}
if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) {
pr_warn("ASUS Event GUID not found\n");
return -ENODEV;
}
if (wdrv->probe) {
ret = wdrv->probe(pdev);
if (ret)
return ret;
}
return asus_wmi_add(pdev);
}
static bool used;
int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
if (used)
return -EBUSY;
platform_driver = &driver->platform_driver;
platform_driver->remove = asus_wmi_remove;
platform_driver->driver.owner = driver->owner;
platform_driver->driver.name = driver->name;
platform_driver->driver.pm = &asus_pm_ops;
platform_device = platform_create_bundle(platform_driver,
asus_wmi_probe,
NULL, 0, NULL, 0);
if (IS_ERR(platform_device))
return PTR_ERR(platform_device);
used = true;
return 0;
}
EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
{
platform_device_unregister(driver->platform_device);
platform_driver_unregister(&driver->platform_driver);
used = false;
}
EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver);
static int __init asus_wmi_init(void)
{
pr_info("ASUS WMI generic driver loaded\n");
return 0;
}
static void __exit asus_wmi_exit(void)
{
pr_info("ASUS WMI generic driver unloaded\n");
}
module_init(asus_wmi_init);
module_exit(asus_wmi_exit);
| linux-master | drivers/platform/x86/asus-wmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* toshiba_wmi.c - Toshiba WMI Hotkey Driver
*
* Copyright (C) 2015 Azael Avalos <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/dmi.h>
MODULE_AUTHOR("Azael Avalos");
MODULE_DESCRIPTION("Toshiba WMI Hotkey Driver");
MODULE_LICENSE("GPL");
#define WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
MODULE_ALIAS("wmi:"WMI_EVENT_GUID);
static struct input_dev *toshiba_wmi_input_dev;
static const struct key_entry toshiba_wmi_keymap[] __initconst = {
/* TODO: Add keymap values once found... */
/*{ KE_KEY, 0x00, { KEY_ } },*/
{ KE_END, 0 }
};
static void toshiba_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (ACPI_FAILURE(status)) {
pr_err("Bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj)
return;
/* TODO: Add proper checks once we have data */
pr_debug("Unknown event received, obj type %x\n", obj->type);
kfree(response.pointer);
}
static const struct dmi_system_id toshiba_wmi_dmi_table[] __initconst = {
{
.ident = "Toshiba laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
},
},
{}
};
static int __init toshiba_wmi_input_setup(void)
{
acpi_status status;
int err;
toshiba_wmi_input_dev = input_allocate_device();
if (!toshiba_wmi_input_dev)
return -ENOMEM;
toshiba_wmi_input_dev->name = "Toshiba WMI hotkeys";
toshiba_wmi_input_dev->phys = "wmi/input0";
toshiba_wmi_input_dev->id.bustype = BUS_HOST;
err = sparse_keymap_setup(toshiba_wmi_input_dev,
toshiba_wmi_keymap, NULL);
if (err)
goto err_free_dev;
status = wmi_install_notify_handler(WMI_EVENT_GUID,
toshiba_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
goto err_free_dev;
}
err = input_register_device(toshiba_wmi_input_dev);
if (err)
goto err_remove_notifier;
return 0;
err_remove_notifier:
wmi_remove_notify_handler(WMI_EVENT_GUID);
err_free_dev:
input_free_device(toshiba_wmi_input_dev);
return err;
}
static void toshiba_wmi_input_destroy(void)
{
wmi_remove_notify_handler(WMI_EVENT_GUID);
input_unregister_device(toshiba_wmi_input_dev);
}
static int __init toshiba_wmi_init(void)
{
int ret;
if (!wmi_has_guid(WMI_EVENT_GUID) ||
!dmi_check_system(toshiba_wmi_dmi_table))
return -ENODEV;
ret = toshiba_wmi_input_setup();
if (ret) {
pr_err("Failed to setup input device\n");
return ret;
}
pr_info("Toshiba WMI Hotkey Driver\n");
return 0;
}
static void __exit toshiba_wmi_exit(void)
{
if (wmi_has_guid(WMI_EVENT_GUID))
toshiba_wmi_input_destroy();
}
module_init(toshiba_wmi_init);
module_exit(toshiba_wmi_exit);
| linux-master | drivers/platform/x86/toshiba-wmi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* System76 ACPI Driver
*
* Copyright (C) 2023 System76
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/pci_ids.h>
#include <linux/power_supply.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <acpi/battery.h>
enum kbled_type {
KBLED_NONE,
KBLED_WHITE,
KBLED_RGB,
};
struct system76_data {
struct acpi_device *acpi_dev;
struct led_classdev ap_led;
struct led_classdev kb_led;
enum led_brightness kb_brightness;
enum led_brightness kb_toggle_brightness;
int kb_color;
struct device *therm;
union acpi_object *nfan;
union acpi_object *ntmp;
struct input_dev *input;
bool has_open_ec;
enum kbled_type kbled_type;
};
static const struct acpi_device_id device_ids[] = {
{"17761776", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, device_ids);
// Array of keyboard LED brightness levels
static const enum led_brightness kb_levels[] = {
48,
72,
96,
144,
192,
255
};
// Array of keyboard LED colors in 24-bit RGB format
static const int kb_colors[] = {
0xFFFFFF,
0x0000FF,
0xFF0000,
0xFF00FF,
0x00FF00,
0x00FFFF,
0xFFFF00
};
// Get a System76 ACPI device value by name
static int system76_get(struct system76_data *data, char *method)
{
acpi_handle handle;
acpi_status status;
unsigned long long ret = 0;
handle = acpi_device_handle(data->acpi_dev);
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if (ACPI_SUCCESS(status))
return ret;
return -ENODEV;
}
// Get a System76 ACPI device value by name with index
static int system76_get_index(struct system76_data *data, char *method, int index)
{
union acpi_object obj;
struct acpi_object_list obj_list;
acpi_handle handle;
acpi_status status;
unsigned long long ret = 0;
obj.type = ACPI_TYPE_INTEGER;
obj.integer.value = index;
obj_list.count = 1;
obj_list.pointer = &obj;
handle = acpi_device_handle(data->acpi_dev);
status = acpi_evaluate_integer(handle, method, &obj_list, &ret);
if (ACPI_SUCCESS(status))
return ret;
return -ENODEV;
}
// Get a System76 ACPI device object by name
static int system76_get_object(struct system76_data *data, char *method, union acpi_object **obj)
{
acpi_handle handle;
acpi_status status;
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
handle = acpi_device_handle(data->acpi_dev);
status = acpi_evaluate_object(handle, method, NULL, &buf);
if (ACPI_SUCCESS(status)) {
*obj = buf.pointer;
return 0;
}
return -ENODEV;
}
// Get a name from a System76 ACPI device object
static char *system76_name(union acpi_object *obj, int index)
{
if (obj && obj->type == ACPI_TYPE_PACKAGE && index <= obj->package.count) {
if (obj->package.elements[index].type == ACPI_TYPE_STRING)
return obj->package.elements[index].string.pointer;
}
return NULL;
}
// Set a System76 ACPI device value by name
static int system76_set(struct system76_data *data, char *method, int value)
{
union acpi_object obj;
struct acpi_object_list obj_list;
acpi_handle handle;
acpi_status status;
obj.type = ACPI_TYPE_INTEGER;
obj.integer.value = value;
obj_list.count = 1;
obj_list.pointer = &obj;
handle = acpi_device_handle(data->acpi_dev);
status = acpi_evaluate_object(handle, method, &obj_list, NULL);
if (ACPI_SUCCESS(status))
return 0;
else
return -1;
}
#define BATTERY_THRESHOLD_INVALID 0xFF
enum {
THRESHOLD_START,
THRESHOLD_END,
};
static ssize_t battery_get_threshold(int which, char *buf)
{
struct acpi_object_list input;
union acpi_object param;
acpi_handle handle;
acpi_status status;
unsigned long long ret = BATTERY_THRESHOLD_INVALID;
handle = ec_get_handle();
if (!handle)
return -ENODEV;
input.count = 1;
input.pointer = ¶m;
// Start/stop selection
param.type = ACPI_TYPE_INTEGER;
param.integer.value = which;
status = acpi_evaluate_integer(handle, "GBCT", &input, &ret);
if (ACPI_FAILURE(status))
return -EIO;
if (ret == BATTERY_THRESHOLD_INVALID)
return -EINVAL;
return sysfs_emit(buf, "%d\n", (int)ret);
}
static ssize_t battery_set_threshold(int which, const char *buf, size_t count)
{
struct acpi_object_list input;
union acpi_object params[2];
acpi_handle handle;
acpi_status status;
unsigned int value;
int ret;
handle = ec_get_handle();
if (!handle)
return -ENODEV;
ret = kstrtouint(buf, 10, &value);
if (ret)
return ret;
if (value > 100)
return -EINVAL;
input.count = 2;
input.pointer = params;
// Start/stop selection
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = which;
// Threshold value
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = value;
status = acpi_evaluate_object(handle, "SBCT", &input, NULL);
if (ACPI_FAILURE(status))
return -EIO;
return count;
}
static ssize_t charge_control_start_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return battery_get_threshold(THRESHOLD_START, buf);
}
static ssize_t charge_control_start_threshold_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return battery_set_threshold(THRESHOLD_START, buf, count);
}
static DEVICE_ATTR_RW(charge_control_start_threshold);
static ssize_t charge_control_end_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return battery_get_threshold(THRESHOLD_END, buf);
}
static ssize_t charge_control_end_threshold_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return battery_set_threshold(THRESHOLD_END, buf, count);
}
static DEVICE_ATTR_RW(charge_control_end_threshold);
static struct attribute *system76_battery_attrs[] = {
&dev_attr_charge_control_start_threshold.attr,
&dev_attr_charge_control_end_threshold.attr,
NULL,
};
ATTRIBUTE_GROUPS(system76_battery);
static int system76_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
{
// System76 EC only supports 1 battery
if (strcmp(battery->desc->name, "BAT0") != 0)
return -ENODEV;
if (device_add_groups(&battery->dev, system76_battery_groups))
return -ENODEV;
return 0;
}
static int system76_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook)
{
device_remove_groups(&battery->dev, system76_battery_groups);
return 0;
}
static struct acpi_battery_hook system76_battery_hook = {
.add_battery = system76_battery_add,
.remove_battery = system76_battery_remove,
.name = "System76 Battery Extension",
};
static void system76_battery_init(void)
{
battery_hook_register(&system76_battery_hook);
}
static void system76_battery_exit(void)
{
battery_hook_unregister(&system76_battery_hook);
}
// Get the airplane mode LED brightness
static enum led_brightness ap_led_get(struct led_classdev *led)
{
struct system76_data *data;
int value;
data = container_of(led, struct system76_data, ap_led);
value = system76_get(data, "GAPL");
if (value > 0)
return (enum led_brightness)value;
else
return LED_OFF;
}
// Set the airplane mode LED brightness
static int ap_led_set(struct led_classdev *led, enum led_brightness value)
{
struct system76_data *data;
data = container_of(led, struct system76_data, ap_led);
return system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
}
// Get the last set keyboard LED brightness
static enum led_brightness kb_led_get(struct led_classdev *led)
{
struct system76_data *data;
data = container_of(led, struct system76_data, kb_led);
return data->kb_brightness;
}
// Set the keyboard LED brightness
static int kb_led_set(struct led_classdev *led, enum led_brightness value)
{
struct system76_data *data;
data = container_of(led, struct system76_data, kb_led);
data->kb_brightness = value;
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
return system76_set(data, "SKBB", (int)data->kb_brightness);
} else {
return system76_set(data, "SKBL", (int)data->kb_brightness);
}
}
// Get the last set keyboard LED color
static ssize_t kb_led_color_show(
struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct led_classdev *led;
struct system76_data *data;
led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
return sysfs_emit(buf, "%06X\n", data->kb_color);
}
// Set the keyboard LED color
static ssize_t kb_led_color_store(
struct device *dev,
struct device_attribute *dev_attr,
const char *buf,
size_t size)
{
struct led_classdev *led;
struct system76_data *data;
unsigned int val;
int ret;
led = dev_get_drvdata(dev);
data = container_of(led, struct system76_data, kb_led);
ret = kstrtouint(buf, 16, &val);
if (ret)
return ret;
if (val > 0xFFFFFF)
return -EINVAL;
data->kb_color = (int)val;
system76_set(data, "SKBC", data->kb_color);
return size;
}
static struct device_attribute dev_attr_kb_led_color = {
.attr = {
.name = "color",
.mode = 0644,
},
.show = kb_led_color_show,
.store = kb_led_color_store,
};
static struct attribute *system76_kb_led_color_attrs[] = {
&dev_attr_kb_led_color.attr,
NULL,
};
ATTRIBUTE_GROUPS(system76_kb_led_color);
// Notify that the keyboard LED was changed by hardware
static void kb_led_notify(struct system76_data *data)
{
led_classdev_notify_brightness_hw_changed(
&data->kb_led,
data->kb_brightness
);
}
// Read keyboard LED brightness as set by hardware
static void kb_led_hotkey_hardware(struct system76_data *data)
{
int value;
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
value = system76_get(data, "GKBB");
} else {
value = system76_get(data, "GKBL");
}
if (value < 0)
return;
data->kb_brightness = value;
kb_led_notify(data);
}
// Toggle the keyboard LED
static void kb_led_hotkey_toggle(struct system76_data *data)
{
if (data->kb_brightness > 0) {
data->kb_toggle_brightness = data->kb_brightness;
kb_led_set(&data->kb_led, 0);
} else {
kb_led_set(&data->kb_led, data->kb_toggle_brightness);
}
kb_led_notify(data);
}
// Decrease the keyboard LED brightness
static void kb_led_hotkey_down(struct system76_data *data)
{
int i;
if (data->kb_brightness > 0) {
for (i = ARRAY_SIZE(kb_levels); i > 0; i--) {
if (kb_levels[i - 1] < data->kb_brightness) {
kb_led_set(&data->kb_led, kb_levels[i - 1]);
break;
}
}
} else {
kb_led_set(&data->kb_led, data->kb_toggle_brightness);
}
kb_led_notify(data);
}
// Increase the keyboard LED brightness
static void kb_led_hotkey_up(struct system76_data *data)
{
int i;
if (data->kb_brightness > 0) {
for (i = 0; i < ARRAY_SIZE(kb_levels); i++) {
if (kb_levels[i] > data->kb_brightness) {
kb_led_set(&data->kb_led, kb_levels[i]);
break;
}
}
} else {
kb_led_set(&data->kb_led, data->kb_toggle_brightness);
}
kb_led_notify(data);
}
// Cycle the keyboard LED color
static void kb_led_hotkey_color(struct system76_data *data)
{
int i;
if (data->kbled_type != KBLED_RGB)
return;
if (data->kb_brightness > 0) {
for (i = 0; i < ARRAY_SIZE(kb_colors); i++) {
if (kb_colors[i] == data->kb_color)
break;
}
i += 1;
if (i >= ARRAY_SIZE(kb_colors))
i = 0;
data->kb_color = kb_colors[i];
system76_set(data, "SKBC", data->kb_color);
} else {
kb_led_set(&data->kb_led, data->kb_toggle_brightness);
}
kb_led_notify(data);
}
static umode_t thermal_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
const struct system76_data *data = drvdata;
switch (type) {
case hwmon_fan:
case hwmon_pwm:
if (system76_name(data->nfan, channel))
return 0444;
break;
case hwmon_temp:
if (system76_name(data->ntmp, channel))
return 0444;
break;
default:
return 0;
}
return 0;
}
static int thermal_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct system76_data *data = dev_get_drvdata(dev);
int raw;
switch (type) {
case hwmon_fan:
if (attr == hwmon_fan_input) {
raw = system76_get_index(data, "GFAN", channel);
if (raw < 0)
return raw;
*val = (raw >> 8) & 0xFFFF;
return 0;
}
break;
case hwmon_pwm:
if (attr == hwmon_pwm_input) {
raw = system76_get_index(data, "GFAN", channel);
if (raw < 0)
return raw;
*val = raw & 0xFF;
return 0;
}
break;
case hwmon_temp:
if (attr == hwmon_temp_input) {
raw = system76_get_index(data, "GTMP", channel);
if (raw < 0)
return raw;
*val = raw * 1000;
return 0;
}
break;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static int thermal_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, const char **str)
{
struct system76_data *data = dev_get_drvdata(dev);
switch (type) {
case hwmon_fan:
if (attr == hwmon_fan_label) {
*str = system76_name(data->nfan, channel);
if (*str)
return 0;
}
break;
case hwmon_temp:
if (attr == hwmon_temp_label) {
*str = system76_name(data->ntmp, channel);
if (*str)
return 0;
}
break;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static const struct hwmon_ops thermal_ops = {
.is_visible = thermal_is_visible,
.read = thermal_read,
.read_string = thermal_read_string,
};
// Allocate up to 8 fans and temperatures
static const struct hwmon_channel_info * const thermal_channel_info[] = {
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL,
HWMON_F_INPUT | HWMON_F_LABEL),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT,
HWMON_PWM_INPUT),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
NULL
};
static const struct hwmon_chip_info thermal_chip_info = {
.ops = &thermal_ops,
.info = thermal_channel_info,
};
static void input_key(struct system76_data *data, unsigned int code)
{
input_report_key(data->input, code, 1);
input_sync(data->input);
input_report_key(data->input, code, 0);
input_sync(data->input);
}
// Handle ACPI notification
static void system76_notify(struct acpi_device *acpi_dev, u32 event)
{
struct system76_data *data;
data = acpi_driver_data(acpi_dev);
switch (event) {
case 0x80:
kb_led_hotkey_hardware(data);
break;
case 0x81:
kb_led_hotkey_toggle(data);
break;
case 0x82:
kb_led_hotkey_down(data);
break;
case 0x83:
kb_led_hotkey_up(data);
break;
case 0x84:
kb_led_hotkey_color(data);
break;
case 0x85:
input_key(data, KEY_SCREENLOCK);
break;
}
}
// Add a System76 ACPI device
static int system76_add(struct acpi_device *acpi_dev)
{
struct system76_data *data;
int err;
data = devm_kzalloc(&acpi_dev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
acpi_dev->driver_data = data;
data->acpi_dev = acpi_dev;
// Some models do not run open EC firmware. Check for an ACPI method
// that only exists on open EC to guard functionality specific to it.
data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
err = system76_get(data, "INIT");
if (err)
return err;
data->ap_led.name = "system76_acpi::airplane";
data->ap_led.flags = LED_CORE_SUSPENDRESUME;
data->ap_led.brightness_get = ap_led_get;
data->ap_led.brightness_set_blocking = ap_led_set;
data->ap_led.max_brightness = 1;
data->ap_led.default_trigger = "rfkill-none";
err = devm_led_classdev_register(&acpi_dev->dev, &data->ap_led);
if (err)
return err;
data->kb_led.name = "system76_acpi::kbd_backlight";
data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
data->kb_led.brightness_get = kb_led_get;
data->kb_led.brightness_set_blocking = kb_led_set;
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
// Use the new ACPI methods
data->kbled_type = system76_get(data, "GKBK");
switch (data->kbled_type) {
case KBLED_NONE:
// Nothing to do: Device will not be registered.
break;
case KBLED_WHITE:
data->kb_led.max_brightness = 255;
data->kb_toggle_brightness = 72;
break;
case KBLED_RGB:
data->kb_led.max_brightness = 255;
data->kb_led.groups = system76_kb_led_color_groups;
data->kb_toggle_brightness = 72;
data->kb_color = 0xffffff;
system76_set(data, "SKBC", data->kb_color);
break;
}
} else {
// Use the old ACPI methods
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
data->kbled_type = KBLED_RGB;
data->kb_led.max_brightness = 255;
data->kb_led.groups = system76_kb_led_color_groups;
data->kb_toggle_brightness = 72;
data->kb_color = 0xffffff;
system76_set(data, "SKBC", data->kb_color);
} else {
data->kbled_type = KBLED_WHITE;
data->kb_led.max_brightness = 5;
}
}
if (data->kbled_type != KBLED_NONE) {
err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
if (err)
return err;
}
data->input = devm_input_allocate_device(&acpi_dev->dev);
if (!data->input)
return -ENOMEM;
data->input->name = "System76 ACPI Hotkeys";
data->input->phys = "system76_acpi/input0";
data->input->id.bustype = BUS_HOST;
data->input->dev.parent = &acpi_dev->dev;
input_set_capability(data->input, EV_KEY, KEY_SCREENLOCK);
err = input_register_device(data->input);
if (err)
goto error;
if (data->has_open_ec) {
err = system76_get_object(data, "NFAN", &data->nfan);
if (err)
goto error;
err = system76_get_object(data, "NTMP", &data->ntmp);
if (err)
goto error;
data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
"system76_acpi", data, &thermal_chip_info, NULL);
err = PTR_ERR_OR_ZERO(data->therm);
if (err)
goto error;
system76_battery_init();
}
return 0;
error:
if (data->has_open_ec) {
kfree(data->ntmp);
kfree(data->nfan);
}
return err;
}
// Remove a System76 ACPI device
static void system76_remove(struct acpi_device *acpi_dev)
{
struct system76_data *data;
data = acpi_driver_data(acpi_dev);
if (data->has_open_ec) {
system76_battery_exit();
kfree(data->nfan);
kfree(data->ntmp);
}
devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
system76_get(data, "FINI");
}
static struct acpi_driver system76_driver = {
.name = "System76 ACPI Driver",
.class = "hotkey",
.ids = device_ids,
.ops = {
.add = system76_add,
.remove = system76_remove,
.notify = system76_notify,
},
};
module_acpi_driver(system76_driver);
MODULE_DESCRIPTION("System76 ACPI Driver");
MODULE_AUTHOR("Jeremy Soller <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/system76_acpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for Lenovo Yoga Book YB1-X90F/L tablets (Android model)
* WMI driver for Lenovo Yoga Book YB1-X91F/L tablets (Windows model)
*
* The keyboard half of the YB1 models can function as both a capacitive
* touch keyboard or as a Wacom digitizer, but not at the same time.
*
* This driver takes care of switching between the 2 functions.
*
* Copyright 2023 Hans de Goede <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/wmi.h>
#include <linux/workqueue.h>
#define YB_MBTN_EVENT_GUID "243FEC1D-1963-41C1-8100-06A9D82A94B4"
#define YB_KBD_BL_DEFAULT 128
#define YB_KBD_BL_MAX 255
#define YB_KBD_BL_PWM_PERIOD 13333
#define YB_PDEV_NAME "yogabook-touch-kbd-digitizer-switch"
/* flags */
enum {
YB_KBD_IS_ON,
YB_DIGITIZER_IS_ON,
YB_DIGITIZER_MODE,
YB_TABLET_MODE,
YB_SUSPENDED,
};
struct yogabook_data {
struct device *dev;
struct acpi_device *kbd_adev;
struct acpi_device *dig_adev;
struct device *kbd_dev;
struct device *dig_dev;
struct led_classdev *pen_led;
struct gpio_desc *pen_touch_event;
struct gpio_desc *kbd_bl_led_enable;
struct gpio_desc *backside_hall_gpio;
struct pwm_device *kbd_bl_pwm;
int (*set_kbd_backlight)(struct yogabook_data *data, uint8_t level);
int pen_touch_irq;
int backside_hall_irq;
struct work_struct work;
struct led_classdev kbd_bl_led;
unsigned long flags;
uint8_t brightness;
};
static void yogabook_work(struct work_struct *work)
{
struct yogabook_data *data = container_of(work, struct yogabook_data, work);
bool kbd_on, digitizer_on;
int r;
if (test_bit(YB_SUSPENDED, &data->flags))
return;
if (test_bit(YB_TABLET_MODE, &data->flags)) {
kbd_on = false;
digitizer_on = false;
} else if (test_bit(YB_DIGITIZER_MODE, &data->flags)) {
digitizer_on = true;
kbd_on = false;
} else {
kbd_on = true;
digitizer_on = false;
}
if (!kbd_on && test_bit(YB_KBD_IS_ON, &data->flags)) {
/*
* Must be done before releasing the keyboard touchscreen driver,
* so that the keyboard touchscreen dev is still in D0.
*/
data->set_kbd_backlight(data, 0);
device_release_driver(data->kbd_dev);
clear_bit(YB_KBD_IS_ON, &data->flags);
}
if (!digitizer_on && test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
led_set_brightness(data->pen_led, LED_OFF);
device_release_driver(data->dig_dev);
clear_bit(YB_DIGITIZER_IS_ON, &data->flags);
}
if (kbd_on && !test_bit(YB_KBD_IS_ON, &data->flags)) {
r = device_reprobe(data->kbd_dev);
if (r)
dev_warn(data->dev, "Reprobe of keyboard touchscreen failed: %d\n", r);
data->set_kbd_backlight(data, data->brightness);
set_bit(YB_KBD_IS_ON, &data->flags);
}
if (digitizer_on && !test_bit(YB_DIGITIZER_IS_ON, &data->flags)) {
r = device_reprobe(data->dig_dev);
if (r)
dev_warn(data->dev, "Reprobe of digitizer failed: %d\n", r);
led_set_brightness(data->pen_led, LED_FULL);
set_bit(YB_DIGITIZER_IS_ON, &data->flags);
}
}
static void yogabook_toggle_digitizer_mode(struct yogabook_data *data)
{
if (test_bit(YB_SUSPENDED, &data->flags))
return;
if (test_bit(YB_DIGITIZER_MODE, &data->flags))
clear_bit(YB_DIGITIZER_MODE, &data->flags);
else
set_bit(YB_DIGITIZER_MODE, &data->flags);
/*
* We are called from the ACPI core and the driver [un]binding which is
* done also needs ACPI functions, use a workqueue to avoid deadlocking.
*/
schedule_work(&data->work);
}
static irqreturn_t yogabook_backside_hall_irq(int irq, void *_data)
{
struct yogabook_data *data = _data;
if (gpiod_get_value(data->backside_hall_gpio))
set_bit(YB_TABLET_MODE, &data->flags);
else
clear_bit(YB_TABLET_MODE, &data->flags);
schedule_work(&data->work);
return IRQ_HANDLED;
}
#define kbd_led_to_yogabook(cdev) container_of(cdev, struct yogabook_data, kbd_bl_led)
static enum led_brightness kbd_brightness_get(struct led_classdev *cdev)
{
struct yogabook_data *data = kbd_led_to_yogabook(cdev);
return data->brightness;
}
static int kbd_brightness_set(struct led_classdev *cdev,
enum led_brightness value)
{
struct yogabook_data *data = kbd_led_to_yogabook(cdev);
if ((value < 0) || (value > YB_KBD_BL_MAX))
return -EINVAL;
data->brightness = value;
if (!test_bit(YB_KBD_IS_ON, &data->flags))
return 0;
return data->set_kbd_backlight(data, data->brightness);
}
static struct gpiod_lookup_table yogabook_gpios = {
.table = {
GPIO_LOOKUP("INT33FF:02", 18, "backside_hall_sw", GPIO_ACTIVE_LOW),
{}
},
};
static struct led_lookup_data yogabook_pen_led = {
.provider = "platform::indicator",
.con_id = "pen-icon-led",
};
static int yogabook_probe(struct device *dev, struct yogabook_data *data,
const char *kbd_bl_led_name)
{
int r;
data->dev = dev;
data->brightness = YB_KBD_BL_DEFAULT;
set_bit(YB_KBD_IS_ON, &data->flags);
set_bit(YB_DIGITIZER_IS_ON, &data->flags);
INIT_WORK(&data->work, yogabook_work);
yogabook_pen_led.dev_id = dev_name(dev);
led_add_lookup(&yogabook_pen_led);
data->pen_led = devm_led_get(dev, "pen-icon-led");
led_remove_lookup(&yogabook_pen_led);
if (IS_ERR(data->pen_led))
return dev_err_probe(dev, PTR_ERR(data->pen_led), "Getting pen icon LED\n");
yogabook_gpios.dev_id = dev_name(dev);
gpiod_add_lookup_table(&yogabook_gpios);
data->backside_hall_gpio = devm_gpiod_get(dev, "backside_hall_sw", GPIOD_IN);
gpiod_remove_lookup_table(&yogabook_gpios);
if (IS_ERR(data->backside_hall_gpio))
return dev_err_probe(dev, PTR_ERR(data->backside_hall_gpio),
"Getting backside_hall_sw GPIO\n");
r = gpiod_to_irq(data->backside_hall_gpio);
if (r < 0)
return dev_err_probe(dev, r, "Getting backside_hall_sw IRQ\n");
data->backside_hall_irq = r;
/* Set default brightness before enabling the IRQ */
data->set_kbd_backlight(data, YB_KBD_BL_DEFAULT);
r = request_irq(data->backside_hall_irq, yogabook_backside_hall_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"backside_hall_sw", data);
if (r)
return dev_err_probe(dev, r, "Requesting backside_hall_sw IRQ\n");
schedule_work(&data->work);
data->kbd_bl_led.name = kbd_bl_led_name;
data->kbd_bl_led.brightness_set_blocking = kbd_brightness_set;
data->kbd_bl_led.brightness_get = kbd_brightness_get;
data->kbd_bl_led.max_brightness = YB_KBD_BL_MAX;
r = devm_led_classdev_register(dev, &data->kbd_bl_led);
if (r < 0) {
dev_err_probe(dev, r, "Registering backlight LED device\n");
goto error_free_irq;
}
dev_set_drvdata(dev, data);
return 0;
error_free_irq:
free_irq(data->backside_hall_irq, data);
cancel_work_sync(&data->work);
return r;
}
static void yogabook_remove(struct yogabook_data *data)
{
int r = 0;
free_irq(data->backside_hall_irq, data);
cancel_work_sync(&data->work);
if (!test_bit(YB_KBD_IS_ON, &data->flags))
r |= device_reprobe(data->kbd_dev);
if (!test_bit(YB_DIGITIZER_IS_ON, &data->flags))
r |= device_reprobe(data->dig_dev);
if (r)
dev_warn(data->dev, "Reprobe of devices failed\n");
}
static int yogabook_suspend(struct device *dev)
{
struct yogabook_data *data = dev_get_drvdata(dev);
set_bit(YB_SUSPENDED, &data->flags);
flush_work(&data->work);
if (test_bit(YB_KBD_IS_ON, &data->flags))
data->set_kbd_backlight(data, 0);
return 0;
}
static int yogabook_resume(struct device *dev)
{
struct yogabook_data *data = dev_get_drvdata(dev);
if (test_bit(YB_KBD_IS_ON, &data->flags))
data->set_kbd_backlight(data, data->brightness);
clear_bit(YB_SUSPENDED, &data->flags);
/* Check for YB_TABLET_MODE changes made during suspend */
schedule_work(&data->work);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(yogabook_pm_ops, yogabook_suspend, yogabook_resume);
/********** WMI driver code **********/
/*
* To control keyboard backlight, call the method KBLC() of the TCS1 ACPI
* device (Goodix touchpad acts as virtual sensor keyboard).
*/
static int yogabook_wmi_set_kbd_backlight(struct yogabook_data *data,
uint8_t level)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object param;
acpi_status status;
dev_dbg(data->dev, "Set KBLC level to %u\n", level);
/* Ensure keyboard touchpad is on before we call KBLC() */
acpi_device_set_power(data->kbd_adev, ACPI_STATE_D0);
input.count = 1;
input.pointer = ¶m;
param.type = ACPI_TYPE_INTEGER;
param.integer.value = YB_KBD_BL_MAX - level;
status = acpi_evaluate_object(acpi_device_handle(data->kbd_adev), "KBLC",
&input, &output);
if (ACPI_FAILURE(status)) {
dev_err(data->dev, "Failed to call KBLC method: 0x%x\n", status);
return status;
}
kfree(output.pointer);
return 0;
}
static int yogabook_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct device *dev = &wdev->dev;
struct yogabook_data *data;
int r;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->kbd_adev = acpi_dev_get_first_match_dev("GDIX1001", NULL, -1);
if (!data->kbd_adev)
return dev_err_probe(dev, -ENODEV, "Cannot find the touchpad device in ACPI tables\n");
data->dig_adev = acpi_dev_get_first_match_dev("WCOM0019", NULL, -1);
if (!data->dig_adev) {
r = dev_err_probe(dev, -ENODEV, "Cannot find the digitizer device in ACPI tables\n");
goto error_put_devs;
}
data->kbd_dev = get_device(acpi_get_first_physical_node(data->kbd_adev));
if (!data->kbd_dev || !data->kbd_dev->driver) {
r = -EPROBE_DEFER;
goto error_put_devs;
}
data->dig_dev = get_device(acpi_get_first_physical_node(data->dig_adev));
if (!data->dig_dev || !data->dig_dev->driver) {
r = -EPROBE_DEFER;
goto error_put_devs;
}
data->set_kbd_backlight = yogabook_wmi_set_kbd_backlight;
r = yogabook_probe(dev, data, "ybwmi::kbd_backlight");
if (r)
goto error_put_devs;
return 0;
error_put_devs:
put_device(data->dig_dev);
put_device(data->kbd_dev);
acpi_dev_put(data->dig_adev);
acpi_dev_put(data->kbd_adev);
return r;
}
static void yogabook_wmi_remove(struct wmi_device *wdev)
{
struct yogabook_data *data = dev_get_drvdata(&wdev->dev);
yogabook_remove(data);
put_device(data->dig_dev);
put_device(data->kbd_dev);
acpi_dev_put(data->dig_adev);
acpi_dev_put(data->kbd_adev);
}
static void yogabook_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
{
yogabook_toggle_digitizer_mode(dev_get_drvdata(&wdev->dev));
}
static const struct wmi_device_id yogabook_wmi_id_table[] = {
{
.guid_string = YB_MBTN_EVENT_GUID,
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(wmi, yogabook_wmi_id_table);
static struct wmi_driver yogabook_wmi_driver = {
.driver = {
.name = "yogabook-wmi",
.pm = pm_sleep_ptr(&yogabook_pm_ops),
},
.no_notify_data = true,
.id_table = yogabook_wmi_id_table,
.probe = yogabook_wmi_probe,
.remove = yogabook_wmi_remove,
.notify = yogabook_wmi_notify,
};
/********** platform driver code **********/
static struct gpiod_lookup_table yogabook_pdev_gpios = {
.dev_id = YB_PDEV_NAME,
.table = {
GPIO_LOOKUP("INT33FF:00", 95, "pen_touch_event", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("INT33FF:03", 52, "enable_keyboard_led", GPIO_ACTIVE_HIGH),
{}
},
};
static int yogabook_pdev_set_kbd_backlight(struct yogabook_data *data, u8 level)
{
struct pwm_state state = {
.period = YB_KBD_BL_PWM_PERIOD,
.duty_cycle = YB_KBD_BL_PWM_PERIOD * level / YB_KBD_BL_MAX,
.enabled = level,
};
pwm_apply_state(data->kbd_bl_pwm, &state);
gpiod_set_value(data->kbd_bl_led_enable, level ? 1 : 0);
return 0;
}
static irqreturn_t yogabook_pen_touch_irq(int irq, void *data)
{
yogabook_toggle_digitizer_mode(data);
return IRQ_HANDLED;
}
static int yogabook_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct yogabook_data *data;
int r;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->kbd_dev = bus_find_device_by_name(&i2c_bus_type, NULL, "i2c-goodix_ts");
if (!data->kbd_dev || !data->kbd_dev->driver) {
r = -EPROBE_DEFER;
goto error_put_devs;
}
data->dig_dev = bus_find_device_by_name(&i2c_bus_type, NULL, "i2c-wacom");
if (!data->dig_dev || !data->dig_dev->driver) {
r = -EPROBE_DEFER;
goto error_put_devs;
}
gpiod_add_lookup_table(&yogabook_pdev_gpios);
data->pen_touch_event = devm_gpiod_get(dev, "pen_touch_event", GPIOD_IN);
data->kbd_bl_led_enable = devm_gpiod_get(dev, "enable_keyboard_led", GPIOD_OUT_HIGH);
gpiod_remove_lookup_table(&yogabook_pdev_gpios);
if (IS_ERR(data->pen_touch_event)) {
r = dev_err_probe(dev, PTR_ERR(data->pen_touch_event),
"Getting pen_touch_event GPIO\n");
goto error_put_devs;
}
if (IS_ERR(data->kbd_bl_led_enable)) {
r = dev_err_probe(dev, PTR_ERR(data->kbd_bl_led_enable),
"Getting enable_keyboard_led GPIO\n");
goto error_put_devs;
}
data->kbd_bl_pwm = devm_pwm_get(dev, "pwm_soc_lpss_2");
if (IS_ERR(data->kbd_bl_pwm)) {
r = dev_err_probe(dev, PTR_ERR(data->kbd_bl_pwm),
"Getting keyboard backlight PWM\n");
goto error_put_devs;
}
r = gpiod_to_irq(data->pen_touch_event);
if (r < 0) {
dev_err_probe(dev, r, "Getting pen_touch_event IRQ\n");
goto error_put_devs;
}
data->pen_touch_irq = r;
r = request_irq(data->pen_touch_irq, yogabook_pen_touch_irq, IRQF_TRIGGER_FALLING,
"pen_touch_event", data);
if (r) {
dev_err_probe(dev, r, "Requesting pen_touch_event IRQ\n");
goto error_put_devs;
}
data->set_kbd_backlight = yogabook_pdev_set_kbd_backlight;
r = yogabook_probe(dev, data, "yogabook::kbd_backlight");
if (r)
goto error_free_irq;
return 0;
error_free_irq:
free_irq(data->pen_touch_irq, data);
cancel_work_sync(&data->work);
error_put_devs:
put_device(data->dig_dev);
put_device(data->kbd_dev);
return r;
}
static void yogabook_pdev_remove(struct platform_device *pdev)
{
struct yogabook_data *data = platform_get_drvdata(pdev);
yogabook_remove(data);
free_irq(data->pen_touch_irq, data);
cancel_work_sync(&data->work);
put_device(data->dig_dev);
put_device(data->kbd_dev);
}
static struct platform_driver yogabook_pdev_driver = {
.probe = yogabook_pdev_probe,
.remove_new = yogabook_pdev_remove,
.driver = {
.name = YB_PDEV_NAME,
.pm = pm_sleep_ptr(&yogabook_pm_ops),
},
};
static int __init yogabook_module_init(void)
{
int r;
r = wmi_driver_register(&yogabook_wmi_driver);
if (r)
return r;
r = platform_driver_register(&yogabook_pdev_driver);
if (r)
wmi_driver_unregister(&yogabook_wmi_driver);
return r;
}
static void __exit yogabook_module_exit(void)
{
platform_driver_unregister(&yogabook_pdev_driver);
wmi_driver_unregister(&yogabook_wmi_driver);
}
module_init(yogabook_module_init);
module_exit(yogabook_module_exit);
MODULE_ALIAS("platform:" YB_PDEV_NAME);
MODULE_AUTHOR("Yauhen Kharuzhy");
MODULE_DESCRIPTION("Lenovo Yoga Book driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/lenovo-yogabook.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Acer Wireless Radio Control Driver
*
* Copyright (C) 2017 Endless Mobile, Inc.
*/
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci_ids.h>
#include <linux/types.h>
static const struct acpi_device_id acer_wireless_acpi_ids[] = {
{"10251229", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, acer_wireless_acpi_ids);
static void acer_wireless_notify(struct acpi_device *adev, u32 event)
{
struct input_dev *idev = acpi_driver_data(adev);
dev_dbg(&adev->dev, "event=%#x\n", event);
if (event != 0x80) {
dev_notice(&adev->dev, "Unknown SMKB event: %#x\n", event);
return;
}
input_report_key(idev, KEY_RFKILL, 1);
input_sync(idev);
input_report_key(idev, KEY_RFKILL, 0);
input_sync(idev);
}
static int acer_wireless_add(struct acpi_device *adev)
{
struct input_dev *idev;
idev = devm_input_allocate_device(&adev->dev);
if (!idev)
return -ENOMEM;
adev->driver_data = idev;
idev->name = "Acer Wireless Radio Control";
idev->phys = "acer-wireless/input0";
idev->id.bustype = BUS_HOST;
idev->id.vendor = PCI_VENDOR_ID_AI;
idev->id.product = 0x1229;
set_bit(EV_KEY, idev->evbit);
set_bit(KEY_RFKILL, idev->keybit);
return input_register_device(idev);
}
static struct acpi_driver acer_wireless_driver = {
.name = "Acer Wireless Radio Control Driver",
.class = "hotkey",
.ids = acer_wireless_acpi_ids,
.ops = {
.add = acer_wireless_add,
.notify = acer_wireless_notify,
},
};
module_acpi_driver(acer_wireless_driver);
MODULE_DESCRIPTION("Acer Wireless Radio Control Driver");
MODULE_AUTHOR("Chris Chiu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/acer-wireless.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* GPD Pocket fan controller driver
*
* Copyright (C) 2017 Hans de Goede <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/devm-helpers.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
#include <linux/workqueue.h>
#define MAX_SPEED 3
#define TEMP_LIMIT0_DEFAULT 55000
#define TEMP_LIMIT1_DEFAULT 60000
#define TEMP_LIMIT2_DEFAULT 65000
#define HYSTERESIS_DEFAULT 3000
#define SPEED_ON_AC_DEFAULT 2
static int temp_limits[3] = {
TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
};
module_param_array(temp_limits, int, NULL, 0444);
MODULE_PARM_DESC(temp_limits,
"Millicelsius values above which the fan speed increases");
static int hysteresis = HYSTERESIS_DEFAULT;
module_param(hysteresis, int, 0444);
MODULE_PARM_DESC(hysteresis,
"Hysteresis in millicelsius before lowering the fan speed");
static int speed_on_ac = SPEED_ON_AC_DEFAULT;
module_param(speed_on_ac, int, 0444);
MODULE_PARM_DESC(speed_on_ac,
"minimum fan speed to allow when system is powered by AC");
struct gpd_pocket_fan_data {
struct device *dev;
struct thermal_zone_device *dts0;
struct thermal_zone_device *dts1;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
struct delayed_work work;
int last_speed;
};
static void gpd_pocket_fan_set_speed(struct gpd_pocket_fan_data *fan, int speed)
{
if (speed == fan->last_speed)
return;
gpiod_direction_output(fan->gpio0, !!(speed & 1));
gpiod_direction_output(fan->gpio1, !!(speed & 2));
fan->last_speed = speed;
}
static int gpd_pocket_fan_min_speed(void)
{
if (power_supply_is_system_supplied())
return speed_on_ac;
else
return 0;
}
static void gpd_pocket_fan_worker(struct work_struct *work)
{
struct gpd_pocket_fan_data *fan =
container_of(work, struct gpd_pocket_fan_data, work.work);
int t0, t1, temp, speed, min_speed, i;
if (thermal_zone_get_temp(fan->dts0, &t0) ||
thermal_zone_get_temp(fan->dts1, &t1)) {
dev_warn(fan->dev, "Error getting temperature\n");
speed = MAX_SPEED;
goto set_speed;
}
temp = max(t0, t1);
speed = fan->last_speed;
min_speed = gpd_pocket_fan_min_speed();
/* Determine minimum speed */
for (i = min_speed; i < ARRAY_SIZE(temp_limits); i++) {
if (temp < temp_limits[i])
break;
}
if (speed < i)
speed = i;
/* Use hysteresis before lowering speed again */
for (i = min_speed; i < ARRAY_SIZE(temp_limits); i++) {
if (temp <= (temp_limits[i] - hysteresis))
break;
}
if (speed > i)
speed = i;
if (fan->last_speed <= 0 && speed)
speed = MAX_SPEED; /* kick start motor */
set_speed:
gpd_pocket_fan_set_speed(fan, speed);
/* When mostly idle (low temp/speed), slow down the poll interval. */
queue_delayed_work(system_wq, &fan->work,
msecs_to_jiffies(4000 / (speed + 1)));
}
static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan)
{
fan->last_speed = -1;
mod_delayed_work(system_wq, &fan->work, 0);
}
static int gpd_pocket_fan_probe(struct platform_device *pdev)
{
struct gpd_pocket_fan_data *fan;
int i, ret;
for (i = 0; i < ARRAY_SIZE(temp_limits); i++) {
if (temp_limits[i] < 20000 || temp_limits[i] > 90000) {
dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 20000 and 90000)\n",
temp_limits[i]);
temp_limits[0] = TEMP_LIMIT0_DEFAULT;
temp_limits[1] = TEMP_LIMIT1_DEFAULT;
temp_limits[2] = TEMP_LIMIT2_DEFAULT;
break;
}
}
if (hysteresis < 1000 || hysteresis > 10000) {
dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
hysteresis);
hysteresis = HYSTERESIS_DEFAULT;
}
if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
speed_on_ac);
speed_on_ac = SPEED_ON_AC_DEFAULT;
}
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
fan->dev = &pdev->dev;
ret = devm_delayed_work_autocancel(&pdev->dev, &fan->work,
gpd_pocket_fan_worker);
if (ret)
return ret;
/* Note this returns a "weak" reference which we don't need to free */
fan->dts0 = thermal_zone_get_zone_by_name("soc_dts0");
if (IS_ERR(fan->dts0))
return -EPROBE_DEFER;
fan->dts1 = thermal_zone_get_zone_by_name("soc_dts1");
if (IS_ERR(fan->dts1))
return -EPROBE_DEFER;
fan->gpio0 = devm_gpiod_get_index(fan->dev, NULL, 0, GPIOD_ASIS);
if (IS_ERR(fan->gpio0))
return PTR_ERR(fan->gpio0);
fan->gpio1 = devm_gpiod_get_index(fan->dev, NULL, 1, GPIOD_ASIS);
if (IS_ERR(fan->gpio1))
return PTR_ERR(fan->gpio1);
gpd_pocket_fan_force_update(fan);
platform_set_drvdata(pdev, fan);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int gpd_pocket_fan_suspend(struct device *dev)
{
struct gpd_pocket_fan_data *fan = dev_get_drvdata(dev);
cancel_delayed_work_sync(&fan->work);
gpd_pocket_fan_set_speed(fan, gpd_pocket_fan_min_speed());
return 0;
}
static int gpd_pocket_fan_resume(struct device *dev)
{
struct gpd_pocket_fan_data *fan = dev_get_drvdata(dev);
gpd_pocket_fan_force_update(fan);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(gpd_pocket_fan_pm_ops,
gpd_pocket_fan_suspend,
gpd_pocket_fan_resume);
static struct acpi_device_id gpd_pocket_fan_acpi_match[] = {
{ "FAN02501" },
{},
};
MODULE_DEVICE_TABLE(acpi, gpd_pocket_fan_acpi_match);
static struct platform_driver gpd_pocket_fan_driver = {
.probe = gpd_pocket_fan_probe,
.driver = {
.name = "gpd_pocket_fan",
.acpi_match_table = gpd_pocket_fan_acpi_match,
.pm = &gpd_pocket_fan_pm_ops,
},
};
module_platform_driver(gpd_pocket_fan_driver);
MODULE_AUTHOR("Hans de Goede <[email protected]");
MODULE_DESCRIPTION("GPD pocket fan driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/gpd-pocket-fan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* adv_swbutton.c - Software Button Interface Driver.
*
* (C) Copyright 2020 Advantech Corporation, Inc
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#define ACPI_BUTTON_HID_SWBTN "AHC0310"
#define ACPI_BUTTON_NOTIFY_SWBTN_RELEASE 0x86
#define ACPI_BUTTON_NOTIFY_SWBTN_PRESSED 0x85
struct adv_swbutton {
struct input_dev *input;
char phys[32];
};
/*-------------------------------------------------------------------------
* Driver Interface
*--------------------------------------------------------------------------
*/
static void adv_swbutton_notify(acpi_handle handle, u32 event, void *context)
{
struct platform_device *device = context;
struct adv_swbutton *button = dev_get_drvdata(&device->dev);
switch (event) {
case ACPI_BUTTON_NOTIFY_SWBTN_RELEASE:
input_report_key(button->input, KEY_PROG1, 0);
input_sync(button->input);
break;
case ACPI_BUTTON_NOTIFY_SWBTN_PRESSED:
input_report_key(button->input, KEY_PROG1, 1);
input_sync(button->input);
break;
default:
dev_dbg(&device->dev, "Unsupported event [0x%x]\n", event);
}
}
static int adv_swbutton_probe(struct platform_device *device)
{
struct adv_swbutton *button;
struct input_dev *input;
acpi_handle handle = ACPI_HANDLE(&device->dev);
acpi_status status;
int error;
button = devm_kzalloc(&device->dev, sizeof(*button), GFP_KERNEL);
if (!button)
return -ENOMEM;
dev_set_drvdata(&device->dev, button);
input = devm_input_allocate_device(&device->dev);
if (!input)
return -ENOMEM;
button->input = input;
snprintf(button->phys, sizeof(button->phys), "%s/button/input0", ACPI_BUTTON_HID_SWBTN);
input->name = "Advantech Software Button";
input->phys = button->phys;
input->id.bustype = BUS_HOST;
input->dev.parent = &device->dev;
set_bit(EV_REP, input->evbit);
input_set_capability(input, EV_KEY, KEY_PROG1);
error = input_register_device(input);
if (error)
return error;
device_init_wakeup(&device->dev, true);
status = acpi_install_notify_handler(handle,
ACPI_DEVICE_NOTIFY,
adv_swbutton_notify,
device);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Error installing notify handler\n");
return -EIO;
}
return 0;
}
static void adv_swbutton_remove(struct platform_device *device)
{
acpi_handle handle = ACPI_HANDLE(&device->dev);
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY,
adv_swbutton_notify);
}
static const struct acpi_device_id button_device_ids[] = {
{ACPI_BUTTON_HID_SWBTN, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
static struct platform_driver adv_swbutton_driver = {
.driver = {
.name = "adv_swbutton",
.acpi_match_table = button_device_ids,
},
.probe = adv_swbutton_probe,
.remove_new = adv_swbutton_remove,
};
module_platform_driver(adv_swbutton_driver);
MODULE_AUTHOR("Andrea Ho");
MODULE_DESCRIPTION("Advantech ACPI SW Button Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/adv_swbutton.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ACPI-WMI mapping driver
*
* Copyright (C) 2007-2008 Carlos Corbacho <[email protected]>
*
* GUID parsing code from ldm.c is:
* Copyright (C) 2001,2002 Richard Russon <[email protected]>
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <[email protected]>
*
* WMI bus infrastructure by Andrew Lutomirski and Darren Hart:
* Copyright (C) 2015 Andrew Lutomirski
* Copyright (C) 2017 VMware, Inc. All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/uuid.h>
#include <linux/wmi.h>
#include <linux/fs.h>
#include <uapi/linux/wmi.h>
MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
MODULE_LICENSE("GPL");
static LIST_HEAD(wmi_block_list);
struct guid_block {
guid_t guid;
union {
char object_id[2];
struct {
unsigned char notify_id;
unsigned char reserved;
};
};
u8 instance_count;
u8 flags;
} __packed;
static_assert(sizeof(typeof_member(struct guid_block, guid)) == 16);
static_assert(sizeof(struct guid_block) == 20);
static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS,
WMI_PROBED,
};
struct wmi_block {
struct wmi_device dev;
struct list_head list;
struct guid_block gblock;
struct miscdevice char_dev;
struct mutex char_mutex;
struct acpi_device *acpi_device;
wmi_notify_handler handler;
void *handler_data;
u64 req_buf_size;
unsigned long flags;
};
/*
* If the GUID data block is marked as expensive, we must enable and
* explicitily disable data collection.
*/
#define ACPI_WMI_EXPENSIVE BIT(0)
#define ACPI_WMI_METHOD BIT(1) /* GUID is a method */
#define ACPI_WMI_STRING BIT(2) /* GUID takes & returns a string */
#define ACPI_WMI_EVENT BIT(3) /* GUID is an event */
static bool debug_event;
module_param(debug_event, bool, 0444);
MODULE_PARM_DESC(debug_event,
"Log WMI Events [0/1]");
static bool debug_dump_wdg;
module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
{"pnp0c14", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
static const char * const allow_duplicates[] = {
"05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
"8A42EA14-4F2A-FD45-6422-0087F7A7E608", /* dell-wmi-ddv */
NULL
};
/*
* GUID parsing functions
*/
static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
{
guid_t guid_input;
struct wmi_block *wblock;
if (!guid_string)
return AE_BAD_PARAMETER;
if (guid_parse(guid_string, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(wblock, &wmi_block_list, list) {
if (guid_equal(&wblock->gblock.guid, &guid_input)) {
if (out)
*out = wblock;
return AE_OK;
}
}
return AE_NOT_FOUND;
}
static bool guid_parse_and_compare(const char *string, const guid_t *guid)
{
guid_t guid_input;
if (guid_parse(string, &guid_input))
return false;
return guid_equal(&guid_input, guid);
}
static const void *find_guid_context(struct wmi_block *wblock,
struct wmi_driver *wdriver)
{
const struct wmi_device_id *id;
id = wdriver->id_table;
if (!id)
return NULL;
while (*id->guid_string) {
if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
return id->context;
id++;
}
return NULL;
}
static int get_subobj_info(acpi_handle handle, const char *pathname,
struct acpi_device_info **info)
{
struct acpi_device_info *dummy_info, **info_ptr;
acpi_handle subobj_handle;
acpi_status status;
status = acpi_get_handle(handle, (char *)pathname, &subobj_handle);
if (status == AE_NOT_FOUND)
return -ENOENT;
else if (ACPI_FAILURE(status))
return -EIO;
info_ptr = info ? info : &dummy_info;
status = acpi_get_object_info(subobj_handle, info_ptr);
if (ACPI_FAILURE(status))
return -EIO;
if (!info)
kfree(dummy_info);
return 0;
}
static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
{
struct guid_block *block;
char method[5];
acpi_status status;
acpi_handle handle;
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
snprintf(method, 5, "WE%02X", block->notify_id);
status = acpi_execute_simple_method(handle, method, enable);
if (status == AE_NOT_FOUND)
return AE_OK;
return status;
}
#define WMI_ACPI_METHOD_NAME_SIZE 5
static inline void get_acpi_method_name(const struct wmi_block *wblock,
const char method,
char buffer[static WMI_ACPI_METHOD_NAME_SIZE])
{
static_assert(ARRAY_SIZE(wblock->gblock.object_id) == 2);
static_assert(WMI_ACPI_METHOD_NAME_SIZE >= 5);
buffer[0] = 'W';
buffer[1] = method;
buffer[2] = wblock->gblock.object_id[0];
buffer[3] = wblock->gblock.object_id[1];
buffer[4] = '\0';
}
static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock)
{
if (wblock->gblock.flags & ACPI_WMI_STRING)
return ACPI_TYPE_STRING;
else
return ACPI_TYPE_BUFFER;
}
static acpi_status get_event_data(const struct wmi_block *wblock, struct acpi_buffer *out)
{
union acpi_object param = {
.integer = {
.type = ACPI_TYPE_INTEGER,
.value = wblock->gblock.notify_id,
}
};
struct acpi_object_list input = {
.count = 1,
.pointer = ¶m,
};
return acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, out);
}
/*
* Exported WMI functions
*/
/**
* set_required_buffer_size - Sets the buffer size needed for performing IOCTL
* @wdev: A wmi bus device from a driver
* @length: Required buffer size
*
* Allocates memory needed for buffer, stores the buffer size in that memory.
*
* Return: 0 on success or a negative error code for failure.
*/
int set_required_buffer_size(struct wmi_device *wdev, u64 length)
{
struct wmi_block *wblock;
wblock = container_of(wdev, struct wmi_block, dev);
wblock->req_buf_size = length;
return 0;
}
EXPORT_SYMBOL_GPL(set_required_buffer_size);
/**
* wmi_instance_count - Get number of WMI object instances
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Get the number of WMI object instances.
*
* Returns: Number of WMI object instances or negative error code.
*/
int wmi_instance_count(const char *guid_string)
{
struct wmi_block *wblock;
acpi_status status;
status = find_guid(guid_string, &wblock);
if (ACPI_FAILURE(status)) {
if (status == AE_BAD_PARAMETER)
return -EINVAL;
return -ENODEV;
}
return wmidev_instance_count(&wblock->dev);
}
EXPORT_SYMBOL_GPL(wmi_instance_count);
/**
* wmidev_instance_count - Get number of WMI object instances
* @wdev: A wmi bus device from a driver
*
* Get the number of WMI object instances.
*
* Returns: Number of WMI object instances.
*/
u8 wmidev_instance_count(struct wmi_device *wdev)
{
struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
return wblock->gblock.instance_count;
}
EXPORT_SYMBOL_GPL(wmidev_instance_count);
/**
* wmi_evaluate_method - Evaluate a WMI method (deprecated)
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @method_id: Method ID to call
* @in: Buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmi_evaluate_method(const char *guid_string, u8 instance, u32 method_id,
const struct acpi_buffer *in, struct acpi_buffer *out)
{
struct wmi_block *wblock = NULL;
acpi_status status;
status = find_guid(guid_string, &wblock);
if (ACPI_FAILURE(status))
return status;
return wmidev_evaluate_method(&wblock->dev, instance, method_id,
in, out);
}
EXPORT_SYMBOL_GPL(wmi_evaluate_method);
/**
* wmidev_evaluate_method - Evaluate a WMI method
* @wdev: A wmi bus device from a driver
* @instance: Instance index
* @method_id: Method ID to call
* @in: Buffer containing input for the method call
* @out: Empty buffer to return the method results
*
* Call an ACPI-WMI method, the caller must free @out.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id,
const struct acpi_buffer *in, struct acpi_buffer *out)
{
struct guid_block *block;
struct wmi_block *wblock;
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[3];
char method[WMI_ACPI_METHOD_NAME_SIZE];
wblock = container_of(wdev, struct wmi_block, dev);
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
if (!(block->flags & ACPI_WMI_METHOD))
return AE_BAD_DATA;
if (block->instance_count <= instance)
return AE_BAD_PARAMETER;
input.count = 2;
input.pointer = params;
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = method_id;
if (in) {
input.count = 3;
params[2].type = get_param_acpi_type(wblock);
params[2].buffer.length = in->length;
params[2].buffer.pointer = in->pointer;
}
get_acpi_method_name(wblock, 'M', method);
return acpi_evaluate_object(handle, method, &input, out);
}
EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
struct acpi_buffer *out)
{
struct guid_block *block;
acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
union acpi_object wq_params[1];
char wc_method[WMI_ACPI_METHOD_NAME_SIZE];
char method[WMI_ACPI_METHOD_NAME_SIZE];
if (!out)
return AE_BAD_PARAMETER;
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
if (block->instance_count <= instance)
return AE_BAD_PARAMETER;
/* Check GUID is a data block */
if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
return AE_ERROR;
input.count = 1;
input.pointer = wq_params;
wq_params[0].type = ACPI_TYPE_INTEGER;
wq_params[0].integer.value = instance;
if (instance == 0 && test_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags))
input.count = 0;
/*
* If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
* enable collection.
*/
if (block->flags & ACPI_WMI_EXPENSIVE) {
get_acpi_method_name(wblock, 'C', wc_method);
/*
* Some GUIDs break the specification by declaring themselves
* expensive, but have no corresponding WCxx method. So we
* should not fail if this happens.
*/
wc_status = acpi_execute_simple_method(handle, wc_method, 1);
}
get_acpi_method_name(wblock, 'Q', method);
status = acpi_evaluate_object(handle, method, &input, out);
/*
* If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
* the WQxx method failed - we should disable collection anyway.
*/
if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
/*
* Ignore whether this WCxx call succeeds or not since
* the previously executed WQxx method call might have
* succeeded, and returning the failing status code
* of this call would throw away the result of the WQxx
* call, potentially leaking memory.
*/
acpi_execute_simple_method(handle, wc_method, 0);
}
return status;
}
/**
* wmi_query_block - Return contents of a WMI block (deprecated)
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @out: Empty buffer to return the contents of the data block to
*
* Query a ACPI-WMI block, the caller must free @out.
*
* Return: ACPI object containing the content of the WMI block.
*/
acpi_status wmi_query_block(const char *guid_string, u8 instance,
struct acpi_buffer *out)
{
struct wmi_block *wblock;
acpi_status status;
status = find_guid(guid_string, &wblock);
if (ACPI_FAILURE(status))
return status;
return __query_block(wblock, instance, out);
}
EXPORT_SYMBOL_GPL(wmi_query_block);
/**
* wmidev_block_query - Return contents of a WMI block
* @wdev: A wmi bus device from a driver
* @instance: Instance index
*
* Query an ACPI-WMI block, the caller must free the result.
*
* Return: ACPI object containing the content of the WMI block.
*/
union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
if (ACPI_FAILURE(__query_block(wblock, instance, &out)))
return NULL;
return out.pointer;
}
EXPORT_SYMBOL_GPL(wmidev_block_query);
/**
* wmi_set_block - Write to a WMI block (deprecated)
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @instance: Instance index
* @in: Buffer containing new values for the data block
*
* Write the contents of the input buffer to an ACPI-WMI data block.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmi_set_block(const char *guid_string, u8 instance,
const struct acpi_buffer *in)
{
struct wmi_block *wblock = NULL;
struct guid_block *block;
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[2];
char method[WMI_ACPI_METHOD_NAME_SIZE];
acpi_status status;
if (!in)
return AE_BAD_DATA;
status = find_guid(guid_string, &wblock);
if (ACPI_FAILURE(status))
return status;
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
if (block->instance_count <= instance)
return AE_BAD_PARAMETER;
/* Check GUID is a data block */
if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
return AE_ERROR;
input.count = 2;
input.pointer = params;
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
params[1].type = get_param_acpi_type(wblock);
params[1].buffer.length = in->length;
params[1].buffer.pointer = in->pointer;
get_acpi_method_name(wblock, 'S', method);
return acpi_evaluate_object(handle, method, &input, NULL);
}
EXPORT_SYMBOL_GPL(wmi_set_block);
static void wmi_dump_wdg(const struct guid_block *g)
{
pr_info("%pUL:\n", &g->guid);
if (g->flags & ACPI_WMI_EVENT)
pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
else
pr_info("\tobject_id: %2pE\n", g->object_id);
pr_info("\tinstance_count: %d\n", g->instance_count);
pr_info("\tflags: %#x", g->flags);
if (g->flags) {
if (g->flags & ACPI_WMI_EXPENSIVE)
pr_cont(" ACPI_WMI_EXPENSIVE");
if (g->flags & ACPI_WMI_METHOD)
pr_cont(" ACPI_WMI_METHOD");
if (g->flags & ACPI_WMI_STRING)
pr_cont(" ACPI_WMI_STRING");
if (g->flags & ACPI_WMI_EVENT)
pr_cont(" ACPI_WMI_EVENT");
}
pr_cont("\n");
}
static void wmi_notify_debug(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = response.pointer;
if (!obj)
return;
pr_info("DEBUG: event 0x%02X ", value);
switch (obj->type) {
case ACPI_TYPE_BUFFER:
pr_cont("BUFFER_TYPE - length %u\n", obj->buffer.length);
break;
case ACPI_TYPE_STRING:
pr_cont("STRING_TYPE - %s\n", obj->string.pointer);
break;
case ACPI_TYPE_INTEGER:
pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value);
break;
case ACPI_TYPE_PACKAGE:
pr_cont("PACKAGE_TYPE - %u elements\n", obj->package.count);
break;
default:
pr_cont("object type 0x%X\n", obj->type);
}
kfree(obj);
}
/**
* wmi_install_notify_handler - Register handler for WMI events (deprecated)
* @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
* @handler: Function to handle notifications
* @data: Data to be returned to handler when event is fired
*
* Register a handler for events sent to the ACPI-WMI mapper device.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler,
void *data)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
guid_t guid_input;
if (!guid || !handler)
return AE_BAD_PARAMETER;
if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
if (guid_equal(&block->gblock.guid, &guid_input)) {
if (block->handler &&
block->handler != wmi_notify_debug)
return AE_ALREADY_ACQUIRED;
block->handler = handler;
block->handler_data = data;
wmi_status = wmi_method_enable(block, true);
if ((wmi_status != AE_OK) ||
((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
status = wmi_status;
}
}
return status;
}
EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
/**
* wmi_remove_notify_handler - Unregister handler for WMI events (deprecated)
* @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Unregister handler for events sent to the ACPI-WMI mapper device.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
guid_t guid_input;
if (!guid)
return AE_BAD_PARAMETER;
if (guid_parse(guid, &guid_input))
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
if (guid_equal(&block->gblock.guid, &guid_input)) {
if (!block->handler ||
block->handler == wmi_notify_debug)
return AE_NULL_ENTRY;
if (debug_event) {
block->handler = wmi_notify_debug;
status = AE_OK;
} else {
wmi_status = wmi_method_enable(block, false);
block->handler = NULL;
block->handler_data = NULL;
if ((wmi_status != AE_OK) ||
((wmi_status == AE_OK) &&
(status == AE_NOT_EXIST)))
status = wmi_status;
}
}
}
return status;
}
EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
/**
* wmi_get_event_data - Get WMI data associated with an event (deprecated)
*
* @event: Event to find
* @out: Buffer to hold event data
*
* Get extra data associated with an WMI event, the caller needs to free @out.
*
* Return: acpi_status signaling success or error.
*/
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
struct guid_block *gblock = &wblock->gblock;
if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
return get_event_data(wblock, out);
}
return AE_NOT_FOUND;
}
EXPORT_SYMBOL_GPL(wmi_get_event_data);
/**
* wmi_has_guid - Check if a GUID is available
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Check if a given GUID is defined by _WDG.
*
* Return: True if GUID is available, false otherwise.
*/
bool wmi_has_guid(const char *guid_string)
{
return ACPI_SUCCESS(find_guid(guid_string, NULL));
}
EXPORT_SYMBOL_GPL(wmi_has_guid);
/**
* wmi_get_acpi_device_uid() - Get _UID name of ACPI device that defines GUID (deprecated)
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Find the _UID of ACPI device associated with this WMI GUID.
*
* Return: The ACPI _UID field value or NULL if the WMI GUID was not found.
*/
char *wmi_get_acpi_device_uid(const char *guid_string)
{
struct wmi_block *wblock = NULL;
acpi_status status;
status = find_guid(guid_string, &wblock);
if (ACPI_FAILURE(status))
return NULL;
return acpi_device_uid(wblock->acpi_device);
}
EXPORT_SYMBOL_GPL(wmi_get_acpi_device_uid);
#define dev_to_wblock(__dev) container_of_const(__dev, struct wmi_block, dev.dev)
#define dev_to_wdev(__dev) container_of_const(__dev, struct wmi_device, dev)
static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv)
{
return container_of(drv, struct wmi_driver, driver);
}
/*
* sysfs interface
*/
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "wmi:%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(guid);
static ssize_t instance_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "%d\n", (int)wblock->gblock.instance_count);
}
static DEVICE_ATTR_RO(instance_count);
static ssize_t expensive_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "%d\n",
(wblock->gblock.flags & ACPI_WMI_EXPENSIVE) != 0);
}
static DEVICE_ATTR_RO(expensive);
static struct attribute *wmi_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_guid.attr,
&dev_attr_instance_count.attr,
&dev_attr_expensive.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi);
static ssize_t notify_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "%02X\n", (unsigned int)wblock->gblock.notify_id);
}
static DEVICE_ATTR_RO(notify_id);
static struct attribute *wmi_event_attrs[] = {
&dev_attr_notify_id.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi_event);
static ssize_t object_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_block *wblock = dev_to_wblock(dev);
return sysfs_emit(buf, "%c%c\n", wblock->gblock.object_id[0],
wblock->gblock.object_id[1]);
}
static DEVICE_ATTR_RO(object_id);
static ssize_t setable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wmi_device *wdev = dev_to_wdev(dev);
return sysfs_emit(buf, "%d\n", (int)wdev->setable);
}
static DEVICE_ATTR_RO(setable);
static struct attribute *wmi_data_attrs[] = {
&dev_attr_object_id.attr,
&dev_attr_setable.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi_data);
static struct attribute *wmi_method_attrs[] = {
&dev_attr_object_id.attr,
NULL
};
ATTRIBUTE_GROUPS(wmi_method);
static int wmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct wmi_block *wblock = dev_to_wblock(dev);
if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid))
return -ENOMEM;
if (add_uevent_var(env, "WMI_GUID=%pUL", &wblock->gblock.guid))
return -ENOMEM;
return 0;
}
static void wmi_dev_release(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
kfree(wblock);
}
static int wmi_dev_match(struct device *dev, struct device_driver *driver)
{
struct wmi_driver *wmi_driver = drv_to_wdrv(driver);
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
if (id == NULL)
return 0;
while (*id->guid_string) {
if (guid_parse_and_compare(id->guid_string, &wblock->gblock.guid))
return 1;
id++;
}
return 0;
}
static int wmi_char_open(struct inode *inode, struct file *filp)
{
const char *driver_name = filp->f_path.dentry->d_iname;
struct wmi_block *wblock;
struct wmi_block *next;
list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
if (!wblock->dev.dev.driver)
continue;
if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
filp->private_data = wblock;
break;
}
}
if (!filp->private_data)
return -ENODEV;
return nonseekable_open(inode, filp);
}
static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
size_t length, loff_t *offset)
{
struct wmi_block *wblock = filp->private_data;
return simple_read_from_buffer(buffer, length, offset,
&wblock->req_buf_size,
sizeof(wblock->req_buf_size));
}
static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct wmi_ioctl_buffer __user *input =
(struct wmi_ioctl_buffer __user *) arg;
struct wmi_block *wblock = filp->private_data;
struct wmi_ioctl_buffer *buf;
struct wmi_driver *wdriver;
int ret;
if (_IOC_TYPE(cmd) != WMI_IOC)
return -ENOTTY;
/* make sure we're not calling a higher instance than exists*/
if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
return -EINVAL;
mutex_lock(&wblock->char_mutex);
buf = wblock->handler_data;
if (get_user(buf->length, &input->length)) {
dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
ret = -EFAULT;
goto out_ioctl;
}
/* if it's too small, abort */
if (buf->length < wblock->req_buf_size) {
dev_err(&wblock->dev.dev,
"Buffer %lld too small, need at least %lld\n",
buf->length, wblock->req_buf_size);
ret = -EINVAL;
goto out_ioctl;
}
/* if it's too big, warn, driver will only use what is needed */
if (buf->length > wblock->req_buf_size)
dev_warn(&wblock->dev.dev,
"Buffer %lld is bigger than required %lld\n",
buf->length, wblock->req_buf_size);
/* copy the structure from userspace */
if (copy_from_user(buf, input, wblock->req_buf_size)) {
dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
wblock->req_buf_size);
ret = -EFAULT;
goto out_ioctl;
}
/* let the driver do any filtering and do the call */
wdriver = drv_to_wdrv(wblock->dev.dev.driver);
if (!try_module_get(wdriver->driver.owner)) {
ret = -EBUSY;
goto out_ioctl;
}
ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
module_put(wdriver->driver.owner);
if (ret)
goto out_ioctl;
/* return the result (only up to our internal buffer size) */
if (copy_to_user(input, buf, wblock->req_buf_size)) {
dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
wblock->req_buf_size);
ret = -EFAULT;
}
out_ioctl:
mutex_unlock(&wblock->char_mutex);
return ret;
}
static const struct file_operations wmi_fops = {
.owner = THIS_MODULE,
.read = wmi_char_read,
.open = wmi_char_open,
.unlocked_ioctl = wmi_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static int wmi_dev_probe(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
int ret = 0;
char *buf;
if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev),
find_guid_context(wblock, wdriver));
if (ret != 0)
goto probe_failure;
}
/* driver wants a character device made */
if (wdriver->filter_callback) {
/* check that required buffer size declared by driver or MOF */
if (!wblock->req_buf_size) {
dev_err(&wblock->dev.dev,
"Required buffer size not set\n");
ret = -EINVAL;
goto probe_failure;
}
wblock->handler_data = kmalloc(wblock->req_buf_size,
GFP_KERNEL);
if (!wblock->handler_data) {
ret = -ENOMEM;
goto probe_failure;
}
buf = kasprintf(GFP_KERNEL, "wmi/%s", wdriver->driver.name);
if (!buf) {
ret = -ENOMEM;
goto probe_string_failure;
}
wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
wblock->char_dev.name = buf;
wblock->char_dev.fops = &wmi_fops;
wblock->char_dev.mode = 0444;
ret = misc_register(&wblock->char_dev);
if (ret) {
dev_warn(dev, "failed to register char dev: %d\n", ret);
ret = -ENOMEM;
goto probe_misc_failure;
}
}
set_bit(WMI_PROBED, &wblock->flags);
return 0;
probe_misc_failure:
kfree(buf);
probe_string_failure:
kfree(wblock->handler_data);
probe_failure:
if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "failed to disable device\n");
return ret;
}
static void wmi_dev_remove(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
clear_bit(WMI_PROBED, &wblock->flags);
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
kfree(wblock->char_dev.name);
kfree(wblock->handler_data);
}
if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev));
if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "failed to disable device\n");
}
static struct class wmi_bus_class = {
.name = "wmi_bus",
};
static struct bus_type wmi_bus_type = {
.name = "wmi",
.dev_groups = wmi_groups,
.match = wmi_dev_match,
.uevent = wmi_dev_uevent,
.probe = wmi_dev_probe,
.remove = wmi_dev_remove,
};
static const struct device_type wmi_type_event = {
.name = "event",
.groups = wmi_event_groups,
.release = wmi_dev_release,
};
static const struct device_type wmi_type_method = {
.name = "method",
.groups = wmi_method_groups,
.release = wmi_dev_release,
};
static const struct device_type wmi_type_data = {
.name = "data",
.groups = wmi_data_groups,
.release = wmi_dev_release,
};
/*
* _WDG is a static list that is only parsed at startup,
* so it's safe to count entries without extra protection.
*/
static int guid_count(const guid_t *guid)
{
struct wmi_block *wblock;
int count = 0;
list_for_each_entry(wblock, &wmi_block_list, list) {
if (guid_equal(&wblock->gblock.guid, guid))
count++;
}
return count;
}
static int wmi_create_device(struct device *wmi_bus_dev,
struct wmi_block *wblock,
struct acpi_device *device)
{
struct acpi_device_info *info;
char method[WMI_ACPI_METHOD_NAME_SIZE];
int result;
uint count;
if (wblock->gblock.flags & ACPI_WMI_EVENT) {
wblock->dev.dev.type = &wmi_type_event;
goto out_init;
}
if (wblock->gblock.flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
mutex_init(&wblock->char_mutex);
goto out_init;
}
/*
* Data Block Query Control Method (WQxx by convention) is
* required per the WMI documentation. If it is not present,
* we ignore this data block.
*/
get_acpi_method_name(wblock, 'Q', method);
result = get_subobj_info(device->handle, method, &info);
if (result) {
dev_warn(wmi_bus_dev,
"%s data block query control method not found\n",
method);
return result;
}
wblock->dev.dev.type = &wmi_type_data;
/*
* The Microsoft documentation specifically states:
*
* Data blocks registered with only a single instance
* can ignore the parameter.
*
* ACPICA will get mad at us if we call the method with the wrong number
* of arguments, so check what our method expects. (On some Dell
* laptops, WQxx may not be a method at all.)
*/
if (info->type != ACPI_TYPE_METHOD || info->param_count == 0)
set_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags);
kfree(info);
get_acpi_method_name(wblock, 'S', method);
result = get_subobj_info(device->handle, method, NULL);
if (result == 0)
wblock->dev.setable = true;
out_init:
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
count = guid_count(&wblock->gblock.guid);
if (count)
dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
else
dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
device_initialize(&wblock->dev.dev);
return 0;
}
static void wmi_free_devices(struct acpi_device *device)
{
struct wmi_block *wblock, *next;
/* Delete devices for all the GUIDs */
list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
if (wblock->acpi_device == device) {
list_del(&wblock->list);
device_unregister(&wblock->dev.dev);
}
}
}
static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
/* skip warning and register if we know the driver will use struct wmi_driver */
for (int i = 0; allow_duplicates[i] != NULL; i++) {
if (guid_parse_and_compare(allow_duplicates[i], guid))
return false;
}
if (guid_equal(&wblock->gblock.guid, guid)) {
/*
* Because we historically didn't track the relationship
* between GUIDs and ACPI nodes, we don't know whether
* we need to suppress GUIDs that are unique on a
* given node but duplicated across nodes.
*/
dev_warn(&device->dev, "duplicate WMI GUID %pUL (first instance was on %s)\n",
guid, dev_name(&wblock->acpi_device->dev));
return true;
}
}
return false;
}
/*
* Parse the _WDG method for the GUID data blocks
*/
static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
{
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
const struct guid_block *gblock;
struct wmi_block *wblock, *next;
union acpi_object *obj;
acpi_status status;
int retval = 0;
u32 i, total;
status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
if (ACPI_FAILURE(status))
return -ENXIO;
obj = out.pointer;
if (!obj)
return -ENXIO;
if (obj->type != ACPI_TYPE_BUFFER) {
retval = -ENXIO;
goto out_free_pointer;
}
gblock = (const struct guid_block *)obj->buffer.pointer;
total = obj->buffer.length / sizeof(struct guid_block);
for (i = 0; i < total; i++) {
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
continue;
wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
if (!wblock) {
retval = -ENOMEM;
break;
}
wblock->acpi_device = device;
wblock->gblock = gblock[i];
retval = wmi_create_device(wmi_bus_dev, wblock, device);
if (retval) {
kfree(wblock);
continue;
}
list_add_tail(&wblock->list, &wmi_block_list);
if (debug_event) {
wblock->handler = wmi_notify_debug;
wmi_method_enable(wblock, true);
}
}
/*
* Now that all of the devices are created, add them to the
* device tree and probe subdrivers.
*/
list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
if (wblock->acpi_device != device)
continue;
retval = device_add(&wblock->dev.dev);
if (retval) {
dev_err(wmi_bus_dev, "failed to register %pUL\n",
&wblock->gblock.guid);
if (debug_event)
wmi_method_enable(wblock, false);
list_del(&wblock->list);
put_device(&wblock->dev.dev);
}
}
out_free_pointer:
kfree(out.pointer);
return retval;
}
/*
* WMI can have EmbeddedControl access regions. In which case, we just want to
* hand these off to the EC driver.
*/
static acpi_status
acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
u32 bits, u64 *value,
void *handler_context, void *region_context)
{
int result = 0, i = 0;
u8 temp = 0;
if ((address > 0xFF) || !value)
return AE_BAD_PARAMETER;
if (function != ACPI_READ && function != ACPI_WRITE)
return AE_BAD_PARAMETER;
if (bits != 8)
return AE_BAD_PARAMETER;
if (function == ACPI_READ) {
result = ec_read(address, &temp);
(*value) |= ((u64)temp) << i;
} else {
temp = 0xff & ((*value) >> i);
result = ec_write(address, temp);
}
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
case -ENODEV:
return AE_NOT_FOUND;
case -ETIME:
return AE_TIME;
default:
return AE_OK;
}
}
static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
void *context)
{
struct wmi_block *wblock = NULL, *iter;
list_for_each_entry(iter, &wmi_block_list, list) {
struct guid_block *block = &iter->gblock;
if (iter->acpi_device->handle == handle &&
(block->flags & ACPI_WMI_EVENT) &&
(block->notify_id == event)) {
wblock = iter;
break;
}
}
if (!wblock)
return;
/* If a driver is bound, then notify the driver. */
if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
if (!driver->no_notify_data) {
status = get_event_data(wblock, &evdata);
if (ACPI_FAILURE(status)) {
dev_warn(&wblock->dev.dev, "failed to get event data\n");
return;
}
}
if (driver->notify)
driver->notify(&wblock->dev, evdata.pointer);
kfree(evdata.pointer);
} else if (wblock->handler) {
/* Legacy handler */
wblock->handler(event, wblock->handler_data);
}
if (debug_event)
pr_info("DEBUG: GUID %pUL event 0x%02X\n", &wblock->gblock.guid, event);
acpi_bus_generate_netlink_event(
wblock->acpi_device->pnp.device_class,
dev_name(&wblock->dev.dev),
event, 0);
}
static void acpi_wmi_remove(struct platform_device *device)
{
struct acpi_device *acpi_device = ACPI_COMPANION(&device->dev);
acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler);
acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
wmi_free_devices(acpi_device);
device_unregister(dev_get_drvdata(&device->dev));
}
static int acpi_wmi_probe(struct platform_device *device)
{
struct acpi_device *acpi_device;
struct device *wmi_bus_dev;
acpi_status status;
int error;
acpi_device = ACPI_COMPANION(&device->dev);
if (!acpi_device) {
dev_err(&device->dev, "ACPI companion is missing\n");
return -ENODEV;
}
status = acpi_install_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC,
&acpi_wmi_ec_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Error installing EC region handler\n");
return -ENODEV;
}
status = acpi_install_notify_handler(acpi_device->handle,
ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler,
NULL);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Error installing notify handler\n");
error = -ENODEV;
goto err_remove_ec_handler;
}
wmi_bus_dev = device_create(&wmi_bus_class, &device->dev, MKDEV(0, 0),
NULL, "wmi_bus-%s", dev_name(&device->dev));
if (IS_ERR(wmi_bus_dev)) {
error = PTR_ERR(wmi_bus_dev);
goto err_remove_notify_handler;
}
dev_set_drvdata(&device->dev, wmi_bus_dev);
error = parse_wdg(wmi_bus_dev, acpi_device);
if (error) {
pr_err("Failed to parse WDG method\n");
goto err_remove_busdev;
}
return 0;
err_remove_busdev:
device_unregister(wmi_bus_dev);
err_remove_notify_handler:
acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler);
err_remove_ec_handler:
acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC,
&acpi_wmi_ec_space_handler);
return error;
}
int __must_check __wmi_driver_register(struct wmi_driver *driver,
struct module *owner)
{
driver->driver.owner = owner;
driver->driver.bus = &wmi_bus_type;
return driver_register(&driver->driver);
}
EXPORT_SYMBOL(__wmi_driver_register);
/**
* wmi_driver_unregister() - Unregister a WMI driver
* @driver: WMI driver to unregister
*
* Unregisters a WMI driver from the WMI bus.
*/
void wmi_driver_unregister(struct wmi_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL(wmi_driver_unregister);
static struct platform_driver acpi_wmi_driver = {
.driver = {
.name = "acpi-wmi",
.acpi_match_table = wmi_device_ids,
},
.probe = acpi_wmi_probe,
.remove_new = acpi_wmi_remove,
};
static int __init acpi_wmi_init(void)
{
int error;
if (acpi_disabled)
return -ENODEV;
error = class_register(&wmi_bus_class);
if (error)
return error;
error = bus_register(&wmi_bus_type);
if (error)
goto err_unreg_class;
error = platform_driver_register(&acpi_wmi_driver);
if (error) {
pr_err("Error loading mapper\n");
goto err_unreg_bus;
}
return 0;
err_unreg_bus:
bus_unregister(&wmi_bus_type);
err_unreg_class:
class_unregister(&wmi_bus_class);
return error;
}
static void __exit acpi_wmi_exit(void)
{
platform_driver_unregister(&acpi_wmi_driver);
bus_unregister(&wmi_bus_type);
class_unregister(&wmi_bus_class);
}
subsys_initcall_sync(acpi_wmi_init);
module_exit(acpi_wmi_exit);
| linux-master | drivers/platform/x86/wmi.c |
// SPDX-License-Identifier: GPL-2.0
/* WMI driver for Xiaomi Laptops */
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/module.h>
#include <linux/wmi.h>
#include <uapi/linux/input-event-codes.h>
#define XIAOMI_KEY_FN_ESC_0 "A2095CCE-0491-44E7-BA27-F8ED8F88AA86"
#define XIAOMI_KEY_FN_ESC_1 "7BBE8E39-B486-473D-BA13-66F75C5805CD"
#define XIAOMI_KEY_FN_FN "409B028D-F06B-4C7C-8BBB-EE133A6BD87E"
#define XIAOMI_KEY_CAPSLOCK "83FE7607-053A-4644-822A-21532C621FC7"
#define XIAOMI_KEY_FN_F7 "76E9027C-95D0-4180-8692-DA6747DD1C2D"
#define XIAOMI_DEVICE(guid, key) \
.guid_string = (guid), \
.context = &(const unsigned int){key}
struct xiaomi_wmi {
struct input_dev *input_dev;
unsigned int key_code;
};
static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct xiaomi_wmi *data;
if (wdev == NULL || context == NULL)
return -EINVAL;
data = devm_kzalloc(&wdev->dev, sizeof(struct xiaomi_wmi), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, data);
data->input_dev = devm_input_allocate_device(&wdev->dev);
if (data->input_dev == NULL)
return -ENOMEM;
data->input_dev->name = "Xiaomi WMI keys";
data->input_dev->phys = "wmi/input0";
data->key_code = *((const unsigned int *)context);
set_bit(EV_KEY, data->input_dev->evbit);
set_bit(data->key_code, data->input_dev->keybit);
return input_register_device(data->input_dev);
}
static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
{
struct xiaomi_wmi *data;
if (wdev == NULL)
return;
data = dev_get_drvdata(&wdev->dev);
if (data == NULL)
return;
input_report_key(data->input_dev, data->key_code, 1);
input_sync(data->input_dev);
input_report_key(data->input_dev, data->key_code, 0);
input_sync(data->input_dev);
}
static const struct wmi_device_id xiaomi_wmi_id_table[] = {
// { XIAOMI_DEVICE(XIAOMI_KEY_FN_ESC_0, KEY_FN_ESC) },
// { XIAOMI_DEVICE(XIAOMI_KEY_FN_ESC_1, KEY_FN_ESC) },
{ XIAOMI_DEVICE(XIAOMI_KEY_FN_FN, KEY_PROG1) },
// { XIAOMI_DEVICE(XIAOMI_KEY_CAPSLOCK, KEY_CAPSLOCK) },
{ XIAOMI_DEVICE(XIAOMI_KEY_FN_F7, KEY_CUT) },
/* Terminating entry */
{ }
};
static struct wmi_driver xiaomi_wmi_driver = {
.driver = {
.name = "xiaomi-wmi",
},
.id_table = xiaomi_wmi_id_table,
.probe = xiaomi_wmi_probe,
.notify = xiaomi_wmi_notify,
};
module_wmi_driver(xiaomi_wmi_driver);
MODULE_DEVICE_TABLE(wmi, xiaomi_wmi_id_table);
MODULE_AUTHOR("Mattias Jacobsson");
MODULE_DESCRIPTION("Xiaomi WMI driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/xiaomi-wmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Asus Wireless Radio Control Driver
*
* Copyright (C) 2015-2016 Endless Mobile, Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/pci_ids.h>
#include <linux/leds.h>
struct hswc_params {
u8 on;
u8 off;
u8 status;
};
struct asus_wireless_data {
struct input_dev *idev;
struct acpi_device *adev;
const struct hswc_params *hswc_params;
struct workqueue_struct *wq;
struct work_struct led_work;
struct led_classdev led;
int led_state;
};
static const struct hswc_params atk4001_id_params = {
.on = 0x0,
.off = 0x1,
.status = 0x2,
};
static const struct hswc_params atk4002_id_params = {
.on = 0x5,
.off = 0x4,
.status = 0x2,
};
static const struct acpi_device_id device_ids[] = {
{"ATK4001", (kernel_ulong_t)&atk4001_id_params},
{"ATK4002", (kernel_ulong_t)&atk4002_id_params},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, device_ids);
static acpi_status asus_wireless_method(acpi_handle handle, const char *method,
int param, u64 *ret)
{
struct acpi_object_list p;
union acpi_object obj;
acpi_status s;
acpi_handle_debug(handle, "Evaluating method %s, parameter %#x\n",
method, param);
obj.type = ACPI_TYPE_INTEGER;
obj.integer.value = param;
p.count = 1;
p.pointer = &obj;
s = acpi_evaluate_integer(handle, (acpi_string) method, &p, ret);
if (ACPI_FAILURE(s))
acpi_handle_err(handle,
"Failed to eval method %s, param %#x (%d)\n",
method, param, s);
else
acpi_handle_debug(handle, "%s returned %#llx\n", method, *ret);
return s;
}
static enum led_brightness led_state_get(struct led_classdev *led)
{
struct asus_wireless_data *data;
acpi_status s;
u64 ret;
data = container_of(led, struct asus_wireless_data, led);
s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
data->hswc_params->status, &ret);
if (ACPI_SUCCESS(s) && ret == data->hswc_params->on)
return LED_FULL;
return LED_OFF;
}
static void led_state_update(struct work_struct *work)
{
struct asus_wireless_data *data;
u64 ret;
data = container_of(work, struct asus_wireless_data, led_work);
asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
data->led_state, &ret);
}
static void led_state_set(struct led_classdev *led, enum led_brightness value)
{
struct asus_wireless_data *data;
data = container_of(led, struct asus_wireless_data, led);
data->led_state = value == LED_OFF ? data->hswc_params->off :
data->hswc_params->on;
queue_work(data->wq, &data->led_work);
}
static void asus_wireless_notify(struct acpi_device *adev, u32 event)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
dev_dbg(&adev->dev, "event=%#x\n", event);
if (event != 0x88) {
dev_notice(&adev->dev, "Unknown ASHS event: %#x\n", event);
return;
}
input_report_key(data->idev, KEY_RFKILL, 1);
input_sync(data->idev);
input_report_key(data->idev, KEY_RFKILL, 0);
input_sync(data->idev);
}
static int asus_wireless_add(struct acpi_device *adev)
{
struct asus_wireless_data *data;
const struct acpi_device_id *id;
int err;
data = devm_kzalloc(&adev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
adev->driver_data = data;
data->adev = adev;
data->idev = devm_input_allocate_device(&adev->dev);
if (!data->idev)
return -ENOMEM;
data->idev->name = "Asus Wireless Radio Control";
data->idev->phys = "asus-wireless/input0";
data->idev->id.bustype = BUS_HOST;
data->idev->id.vendor = PCI_VENDOR_ID_ASUSTEK;
set_bit(EV_KEY, data->idev->evbit);
set_bit(KEY_RFKILL, data->idev->keybit);
err = input_register_device(data->idev);
if (err)
return err;
for (id = device_ids; id->id[0]; id++) {
if (!strcmp((char *) id->id, acpi_device_hid(adev))) {
data->hswc_params =
(const struct hswc_params *)id->driver_data;
break;
}
}
if (!data->hswc_params)
return 0;
data->wq = create_singlethread_workqueue("asus_wireless_workqueue");
if (!data->wq)
return -ENOMEM;
INIT_WORK(&data->led_work, led_state_update);
data->led.name = "asus-wireless::airplane";
data->led.brightness_set = led_state_set;
data->led.brightness_get = led_state_get;
data->led.flags = LED_CORE_SUSPENDRESUME;
data->led.max_brightness = 1;
data->led.default_trigger = "rfkill-none";
err = devm_led_classdev_register(&adev->dev, &data->led);
if (err)
destroy_workqueue(data->wq);
return err;
}
static void asus_wireless_remove(struct acpi_device *adev)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
if (data->wq) {
devm_led_classdev_unregister(&adev->dev, &data->led);
destroy_workqueue(data->wq);
}
}
static struct acpi_driver asus_wireless_driver = {
.name = "Asus Wireless Radio Control Driver",
.class = "hotkey",
.ids = device_ids,
.ops = {
.add = asus_wireless_add,
.remove = asus_wireless_remove,
.notify = asus_wireless_notify,
},
};
module_acpi_driver(asus_wireless_driver);
MODULE_DESCRIPTION("Asus Wireless Radio Control Driver");
MODULE_AUTHOR("João Paulo Rechi Vita <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/asus-wireless.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung Laptop driver
*
* Copyright (C) 2009,2011 Greg Kroah-Hartman ([email protected])
* Copyright (C) 2009,2011 Novell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/fb.h>
#include <linux/dmi.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/acpi.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/suspend.h>
#include <acpi/video.h>
/*
* This driver is needed because a number of Samsung laptops do not hook
* their control settings through ACPI. So we have to poke around in the
* BIOS to do things like brightness values, and "special" key controls.
*/
/*
* We have 0 - 8 as valid brightness levels. The specs say that level 0 should
* be reserved by the BIOS (which really doesn't make much sense), we tell
* userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
*/
#define MAX_BRIGHT 0x07
#define SABI_IFACE_MAIN 0x00
#define SABI_IFACE_SUB 0x02
#define SABI_IFACE_COMPLETE 0x04
#define SABI_IFACE_DATA 0x05
#define WL_STATUS_WLAN 0x0
#define WL_STATUS_BT 0x2
/* Structure get/set data using sabi */
struct sabi_data {
union {
struct {
u32 d0;
u32 d1;
u16 d2;
u8 d3;
};
u8 data[11];
};
};
struct sabi_header_offsets {
u8 port;
u8 re_mem;
u8 iface_func;
u8 en_mem;
u8 data_offset;
u8 data_segment;
};
struct sabi_commands {
/*
* Brightness is 0 - 8, as described above.
* Value 0 is for the BIOS to use
*/
u16 get_brightness;
u16 set_brightness;
/*
* first byte:
* 0x00 - wireless is off
* 0x01 - wireless is on
* second byte:
* 0x02 - 3G is off
* 0x03 - 3G is on
* TODO, verify 3G is correct, that doesn't seem right...
*/
u16 get_wireless_button;
u16 set_wireless_button;
/* 0 is off, 1 is on */
u16 get_backlight;
u16 set_backlight;
/*
* 0x80 or 0x00 - no action
* 0x81 - recovery key pressed
*/
u16 get_recovery_mode;
u16 set_recovery_mode;
/*
* on seclinux: 0 is low, 1 is high,
* on swsmi: 0 is normal, 1 is silent, 2 is turbo
*/
u16 get_performance_level;
u16 set_performance_level;
/* 0x80 is off, 0x81 is on */
u16 get_battery_life_extender;
u16 set_battery_life_extender;
/* 0x80 is off, 0x81 is on */
u16 get_usb_charge;
u16 set_usb_charge;
/* the first byte is for bluetooth and the third one is for wlan */
u16 get_wireless_status;
u16 set_wireless_status;
/* 0x80 is off, 0x81 is on */
u16 get_lid_handling;
u16 set_lid_handling;
/* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
u16 kbd_backlight;
/*
* Tell the BIOS that Linux is running on this machine.
* 81 is on, 80 is off
*/
u16 set_linux;
};
struct sabi_performance_level {
const char *name;
u16 value;
};
struct sabi_config {
int sabi_version;
const char *test_string;
u16 main_function;
const struct sabi_header_offsets header_offsets;
const struct sabi_commands commands;
const struct sabi_performance_level performance_levels[4];
u8 min_brightness;
u8 max_brightness;
};
static const struct sabi_config sabi_configs[] = {
{
/* I don't know if it is really 2, but it is
* less than 3 anyway */
.sabi_version = 2,
.test_string = "SECLINUX",
.main_function = 0x4c49,
.header_offsets = {
.port = 0x00,
.re_mem = 0x02,
.iface_func = 0x03,
.en_mem = 0x04,
.data_offset = 0x05,
.data_segment = 0x07,
},
.commands = {
.get_brightness = 0x00,
.set_brightness = 0x01,
.get_wireless_button = 0x02,
.set_wireless_button = 0x03,
.get_backlight = 0x04,
.set_backlight = 0x05,
.get_recovery_mode = 0x06,
.set_recovery_mode = 0x07,
.get_performance_level = 0x08,
.set_performance_level = 0x09,
.get_battery_life_extender = 0xFFFF,
.set_battery_life_extender = 0xFFFF,
.get_usb_charge = 0xFFFF,
.set_usb_charge = 0xFFFF,
.get_wireless_status = 0xFFFF,
.set_wireless_status = 0xFFFF,
.get_lid_handling = 0xFFFF,
.set_lid_handling = 0xFFFF,
.kbd_backlight = 0xFFFF,
.set_linux = 0x0a,
},
.performance_levels = {
{
.name = "silent",
.value = 0,
},
{
.name = "normal",
.value = 1,
},
{ },
},
.min_brightness = 1,
.max_brightness = 8,
},
{
.sabi_version = 3,
.test_string = "SwSmi@",
.main_function = 0x5843,
.header_offsets = {
.port = 0x00,
.re_mem = 0x04,
.iface_func = 0x02,
.en_mem = 0x03,
.data_offset = 0x05,
.data_segment = 0x07,
},
.commands = {
.get_brightness = 0x10,
.set_brightness = 0x11,
.get_wireless_button = 0x12,
.set_wireless_button = 0x13,
.get_backlight = 0x2d,
.set_backlight = 0x2e,
.get_recovery_mode = 0xff,
.set_recovery_mode = 0xff,
.get_performance_level = 0x31,
.set_performance_level = 0x32,
.get_battery_life_extender = 0x65,
.set_battery_life_extender = 0x66,
.get_usb_charge = 0x67,
.set_usb_charge = 0x68,
.get_wireless_status = 0x69,
.set_wireless_status = 0x6a,
.get_lid_handling = 0x6d,
.set_lid_handling = 0x6e,
.kbd_backlight = 0x78,
.set_linux = 0xff,
},
.performance_levels = {
{
.name = "normal",
.value = 0,
},
{
.name = "silent",
.value = 1,
},
{
.name = "overclock",
.value = 2,
},
{ },
},
.min_brightness = 0,
.max_brightness = 8,
},
{ },
};
/*
* samsung-laptop/ - debugfs root directory
* f0000_segment - dump f0000 segment
* command - current command
* data - current data
* d0, d1, d2, d3 - data fields
* call - call SABI using command and data
*
* This allow to call arbitrary sabi commands wihout
* modifying the driver at all.
* For example, setting the keyboard backlight brightness to 5
*
* echo 0x78 > command
* echo 0x0582 > d0
* echo 0 > d1
* echo 0 > d2
* echo 0 > d3
* cat call
*/
struct samsung_laptop_debug {
struct dentry *root;
struct sabi_data data;
u16 command;
struct debugfs_blob_wrapper f0000_wrapper;
struct debugfs_blob_wrapper data_wrapper;
struct debugfs_blob_wrapper sdiag_wrapper;
};
struct samsung_laptop;
struct samsung_rfkill {
struct samsung_laptop *samsung;
struct rfkill *rfkill;
enum rfkill_type type;
};
struct samsung_laptop {
const struct sabi_config *config;
void __iomem *sabi;
void __iomem *sabi_iface;
void __iomem *f0000_segment;
struct mutex sabi_mutex;
struct platform_device *platform_device;
struct backlight_device *backlight_device;
struct samsung_rfkill wlan;
struct samsung_rfkill bluetooth;
struct led_classdev kbd_led;
int kbd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct kbd_led_work;
struct samsung_laptop_debug debug;
struct samsung_quirks *quirks;
struct notifier_block pm_nb;
bool handle_backlight;
bool has_stepping_quirk;
char sdiag[64];
};
struct samsung_quirks {
bool four_kbd_backlight_levels;
bool enable_kbd_backlight;
bool lid_handling;
};
static struct samsung_quirks samsung_unknown = {};
static struct samsung_quirks samsung_np740u3e = {
.four_kbd_backlight_levels = true,
.enable_kbd_backlight = true,
};
static struct samsung_quirks samsung_lid_handling = {
.lid_handling = true,
};
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force,
"Disable the DMI check and forces the driver to be loaded");
static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug enabled or not");
static int sabi_command(struct samsung_laptop *samsung, u16 command,
struct sabi_data *in,
struct sabi_data *out)
{
const struct sabi_config *config = samsung->config;
int ret = 0;
u16 port = readw(samsung->sabi + config->header_offsets.port);
u8 complete, iface_data;
mutex_lock(&samsung->sabi_mutex);
if (debug) {
if (in)
pr_info("SABI command:0x%04x "
"data:{0x%08x, 0x%08x, 0x%04x, 0x%02x}",
command, in->d0, in->d1, in->d2, in->d3);
else
pr_info("SABI command:0x%04x", command);
}
/* enable memory to be able to write to it */
outb(readb(samsung->sabi + config->header_offsets.en_mem), port);
/* write out the command */
writew(config->main_function, samsung->sabi_iface + SABI_IFACE_MAIN);
writew(command, samsung->sabi_iface + SABI_IFACE_SUB);
writeb(0, samsung->sabi_iface + SABI_IFACE_COMPLETE);
if (in) {
writel(in->d0, samsung->sabi_iface + SABI_IFACE_DATA);
writel(in->d1, samsung->sabi_iface + SABI_IFACE_DATA + 4);
writew(in->d2, samsung->sabi_iface + SABI_IFACE_DATA + 8);
writeb(in->d3, samsung->sabi_iface + SABI_IFACE_DATA + 10);
}
outb(readb(samsung->sabi + config->header_offsets.iface_func), port);
/* write protect memory to make it safe */
outb(readb(samsung->sabi + config->header_offsets.re_mem), port);
/* see if the command actually succeeded */
complete = readb(samsung->sabi_iface + SABI_IFACE_COMPLETE);
iface_data = readb(samsung->sabi_iface + SABI_IFACE_DATA);
/* iface_data = 0xFF happens when a command is not known
* so we only add a warning in debug mode since we will
* probably issue some unknown command at startup to find
* out which features are supported */
if (complete != 0xaa || (iface_data == 0xff && debug))
pr_warn("SABI command 0x%04x failed with"
" completion flag 0x%02x and interface data 0x%02x",
command, complete, iface_data);
if (complete != 0xaa || iface_data == 0xff) {
ret = -EINVAL;
goto exit;
}
if (out) {
out->d0 = readl(samsung->sabi_iface + SABI_IFACE_DATA);
out->d1 = readl(samsung->sabi_iface + SABI_IFACE_DATA + 4);
out->d2 = readw(samsung->sabi_iface + SABI_IFACE_DATA + 2);
out->d3 = readb(samsung->sabi_iface + SABI_IFACE_DATA + 1);
}
if (debug && out) {
pr_info("SABI return data:{0x%08x, 0x%08x, 0x%04x, 0x%02x}",
out->d0, out->d1, out->d2, out->d3);
}
exit:
mutex_unlock(&samsung->sabi_mutex);
return ret;
}
/* simple wrappers usable with most commands */
static int sabi_set_commandb(struct samsung_laptop *samsung,
u16 command, u8 data)
{
struct sabi_data in = { { { .d0 = 0, .d1 = 0, .d2 = 0, .d3 = 0 } } };
in.data[0] = data;
return sabi_command(samsung, command, &in, NULL);
}
static int read_brightness(struct samsung_laptop *samsung)
{
const struct sabi_config *config = samsung->config;
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data sretval;
int user_brightness = 0;
int retval;
retval = sabi_command(samsung, commands->get_brightness,
NULL, &sretval);
if (retval)
return retval;
user_brightness = sretval.data[0];
if (user_brightness > config->min_brightness)
user_brightness -= config->min_brightness;
else
user_brightness = 0;
return user_brightness;
}
static void set_brightness(struct samsung_laptop *samsung, u8 user_brightness)
{
const struct sabi_config *config = samsung->config;
const struct sabi_commands *commands = &samsung->config->commands;
u8 user_level = user_brightness + config->min_brightness;
if (samsung->has_stepping_quirk && user_level != 0) {
/*
* short circuit if the specified level is what's already set
* to prevent the screen from flickering needlessly
*/
if (user_brightness == read_brightness(samsung))
return;
sabi_set_commandb(samsung, commands->set_brightness, 0);
}
sabi_set_commandb(samsung, commands->set_brightness, user_level);
}
static int get_brightness(struct backlight_device *bd)
{
struct samsung_laptop *samsung = bl_get_data(bd);
return read_brightness(samsung);
}
static void check_for_stepping_quirk(struct samsung_laptop *samsung)
{
int initial_level;
int check_level;
int orig_level = read_brightness(samsung);
/*
* Some laptops exhibit the strange behaviour of stepping toward
* (rather than setting) the brightness except when changing to/from
* brightness level 0. This behaviour is checked for here and worked
* around in set_brightness.
*/
if (orig_level == 0)
set_brightness(samsung, 1);
initial_level = read_brightness(samsung);
if (initial_level <= 2)
check_level = initial_level + 2;
else
check_level = initial_level - 2;
samsung->has_stepping_quirk = false;
set_brightness(samsung, check_level);
if (read_brightness(samsung) != check_level) {
samsung->has_stepping_quirk = true;
pr_info("enabled workaround for brightness stepping quirk\n");
}
set_brightness(samsung, orig_level);
}
static int update_status(struct backlight_device *bd)
{
struct samsung_laptop *samsung = bl_get_data(bd);
const struct sabi_commands *commands = &samsung->config->commands;
set_brightness(samsung, bd->props.brightness);
if (bd->props.power == FB_BLANK_UNBLANK)
sabi_set_commandb(samsung, commands->set_backlight, 1);
else
sabi_set_commandb(samsung, commands->set_backlight, 0);
return 0;
}
static const struct backlight_ops backlight_ops = {
.get_brightness = get_brightness,
.update_status = update_status,
};
static int seclinux_rfkill_set(void *data, bool blocked)
{
struct samsung_rfkill *srfkill = data;
struct samsung_laptop *samsung = srfkill->samsung;
const struct sabi_commands *commands = &samsung->config->commands;
return sabi_set_commandb(samsung, commands->set_wireless_button,
!blocked);
}
static const struct rfkill_ops seclinux_rfkill_ops = {
.set_block = seclinux_rfkill_set,
};
static int swsmi_wireless_status(struct samsung_laptop *samsung,
struct sabi_data *data)
{
const struct sabi_commands *commands = &samsung->config->commands;
return sabi_command(samsung, commands->get_wireless_status,
NULL, data);
}
static int swsmi_rfkill_set(void *priv, bool blocked)
{
struct samsung_rfkill *srfkill = priv;
struct samsung_laptop *samsung = srfkill->samsung;
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int ret, i;
ret = swsmi_wireless_status(samsung, &data);
if (ret)
return ret;
/* Don't set the state for non-present devices */
for (i = 0; i < 4; i++)
if (data.data[i] == 0x02)
data.data[1] = 0;
if (srfkill->type == RFKILL_TYPE_WLAN)
data.data[WL_STATUS_WLAN] = !blocked;
else if (srfkill->type == RFKILL_TYPE_BLUETOOTH)
data.data[WL_STATUS_BT] = !blocked;
return sabi_command(samsung, commands->set_wireless_status,
&data, &data);
}
static void swsmi_rfkill_query(struct rfkill *rfkill, void *priv)
{
struct samsung_rfkill *srfkill = priv;
struct samsung_laptop *samsung = srfkill->samsung;
struct sabi_data data;
int ret;
ret = swsmi_wireless_status(samsung, &data);
if (ret)
return ;
if (srfkill->type == RFKILL_TYPE_WLAN)
ret = data.data[WL_STATUS_WLAN];
else if (srfkill->type == RFKILL_TYPE_BLUETOOTH)
ret = data.data[WL_STATUS_BT];
else
return ;
rfkill_set_sw_state(rfkill, !ret);
}
static const struct rfkill_ops swsmi_rfkill_ops = {
.set_block = swsmi_rfkill_set,
.query = swsmi_rfkill_query,
};
static ssize_t get_performance_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
const struct sabi_config *config = samsung->config;
const struct sabi_commands *commands = &config->commands;
struct sabi_data sretval;
int retval;
int i;
/* Read the state */
retval = sabi_command(samsung, commands->get_performance_level,
NULL, &sretval);
if (retval)
return retval;
/* The logic is backwards, yeah, lots of fun... */
for (i = 0; config->performance_levels[i].name; ++i) {
if (sretval.data[0] == config->performance_levels[i].value)
return sprintf(buf, "%s\n", config->performance_levels[i].name);
}
return sprintf(buf, "%s\n", "unknown");
}
static ssize_t set_performance_level(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
const struct sabi_config *config = samsung->config;
const struct sabi_commands *commands = &config->commands;
int i;
if (count < 1)
return count;
for (i = 0; config->performance_levels[i].name; ++i) {
const struct sabi_performance_level *level =
&config->performance_levels[i];
if (!strncasecmp(level->name, buf, strlen(level->name))) {
sabi_set_commandb(samsung,
commands->set_performance_level,
level->value);
break;
}
}
if (!config->performance_levels[i].name)
return -EINVAL;
return count;
}
static DEVICE_ATTR(performance_level, 0644,
get_performance_level, set_performance_level);
static int read_battery_life_extender(struct samsung_laptop *samsung)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int retval;
if (commands->get_battery_life_extender == 0xFFFF)
return -ENODEV;
memset(&data, 0, sizeof(data));
data.data[0] = 0x80;
retval = sabi_command(samsung, commands->get_battery_life_extender,
&data, &data);
if (retval)
return retval;
if (data.data[0] != 0 && data.data[0] != 1)
return -ENODEV;
return data.data[0];
}
static int write_battery_life_extender(struct samsung_laptop *samsung,
int enabled)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
memset(&data, 0, sizeof(data));
data.data[0] = 0x80 | enabled;
return sabi_command(samsung, commands->set_battery_life_extender,
&data, NULL);
}
static ssize_t get_battery_life_extender(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret;
ret = read_battery_life_extender(samsung);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", ret);
}
static ssize_t set_battery_life_extender(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret, value;
if (!count || kstrtoint(buf, 0, &value) != 0)
return -EINVAL;
ret = write_battery_life_extender(samsung, !!value);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(battery_life_extender, 0644,
get_battery_life_extender, set_battery_life_extender);
static int read_usb_charge(struct samsung_laptop *samsung)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int retval;
if (commands->get_usb_charge == 0xFFFF)
return -ENODEV;
memset(&data, 0, sizeof(data));
data.data[0] = 0x80;
retval = sabi_command(samsung, commands->get_usb_charge,
&data, &data);
if (retval)
return retval;
if (data.data[0] != 0 && data.data[0] != 1)
return -ENODEV;
return data.data[0];
}
static int write_usb_charge(struct samsung_laptop *samsung,
int enabled)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
memset(&data, 0, sizeof(data));
data.data[0] = 0x80 | enabled;
return sabi_command(samsung, commands->set_usb_charge,
&data, NULL);
}
static ssize_t get_usb_charge(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret;
ret = read_usb_charge(samsung);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", ret);
}
static ssize_t set_usb_charge(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret, value;
if (!count || kstrtoint(buf, 0, &value) != 0)
return -EINVAL;
ret = write_usb_charge(samsung, !!value);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(usb_charge, 0644,
get_usb_charge, set_usb_charge);
static int read_lid_handling(struct samsung_laptop *samsung)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int retval;
if (commands->get_lid_handling == 0xFFFF)
return -ENODEV;
memset(&data, 0, sizeof(data));
retval = sabi_command(samsung, commands->get_lid_handling,
&data, &data);
if (retval)
return retval;
return data.data[0] & 0x1;
}
static int write_lid_handling(struct samsung_laptop *samsung,
int enabled)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
memset(&data, 0, sizeof(data));
data.data[0] = 0x80 | enabled;
return sabi_command(samsung, commands->set_lid_handling,
&data, NULL);
}
static ssize_t get_lid_handling(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret;
ret = read_lid_handling(samsung);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", ret);
}
static ssize_t set_lid_handling(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct samsung_laptop *samsung = dev_get_drvdata(dev);
int ret, value;
if (!count || kstrtoint(buf, 0, &value) != 0)
return -EINVAL;
ret = write_lid_handling(samsung, !!value);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(lid_handling, 0644,
get_lid_handling, set_lid_handling);
static struct attribute *platform_attributes[] = {
&dev_attr_performance_level.attr,
&dev_attr_battery_life_extender.attr,
&dev_attr_usb_charge.attr,
&dev_attr_lid_handling.attr,
NULL
};
static int find_signature(void __iomem *memcheck, const char *testStr)
{
int i = 0;
int loca;
for (loca = 0; loca < 0xffff; loca++) {
char temp = readb(memcheck + loca);
if (temp == testStr[i]) {
if (i == strlen(testStr)-1)
break;
++i;
} else {
i = 0;
}
}
return loca;
}
static void samsung_rfkill_exit(struct samsung_laptop *samsung)
{
if (samsung->wlan.rfkill) {
rfkill_unregister(samsung->wlan.rfkill);
rfkill_destroy(samsung->wlan.rfkill);
samsung->wlan.rfkill = NULL;
}
if (samsung->bluetooth.rfkill) {
rfkill_unregister(samsung->bluetooth.rfkill);
rfkill_destroy(samsung->bluetooth.rfkill);
samsung->bluetooth.rfkill = NULL;
}
}
static int samsung_new_rfkill(struct samsung_laptop *samsung,
struct samsung_rfkill *arfkill,
const char *name, enum rfkill_type type,
const struct rfkill_ops *ops,
int blocked)
{
struct rfkill **rfkill = &arfkill->rfkill;
int ret;
arfkill->type = type;
arfkill->samsung = samsung;
*rfkill = rfkill_alloc(name, &samsung->platform_device->dev,
type, ops, arfkill);
if (!*rfkill)
return -EINVAL;
if (blocked != -1)
rfkill_init_sw_state(*rfkill, blocked);
ret = rfkill_register(*rfkill);
if (ret) {
rfkill_destroy(*rfkill);
*rfkill = NULL;
return ret;
}
return 0;
}
static int __init samsung_rfkill_init_seclinux(struct samsung_laptop *samsung)
{
return samsung_new_rfkill(samsung, &samsung->wlan, "samsung-wlan",
RFKILL_TYPE_WLAN, &seclinux_rfkill_ops, -1);
}
static int __init samsung_rfkill_init_swsmi(struct samsung_laptop *samsung)
{
struct sabi_data data;
int ret;
ret = swsmi_wireless_status(samsung, &data);
if (ret) {
/* Some swsmi laptops use the old seclinux way to control
* wireless devices */
if (ret == -EINVAL)
ret = samsung_rfkill_init_seclinux(samsung);
return ret;
}
/* 0x02 seems to mean that the device is no present/available */
if (data.data[WL_STATUS_WLAN] != 0x02)
ret = samsung_new_rfkill(samsung, &samsung->wlan,
"samsung-wlan",
RFKILL_TYPE_WLAN,
&swsmi_rfkill_ops,
!data.data[WL_STATUS_WLAN]);
if (ret)
goto exit;
if (data.data[WL_STATUS_BT] != 0x02)
ret = samsung_new_rfkill(samsung, &samsung->bluetooth,
"samsung-bluetooth",
RFKILL_TYPE_BLUETOOTH,
&swsmi_rfkill_ops,
!data.data[WL_STATUS_BT]);
if (ret)
goto exit;
exit:
if (ret)
samsung_rfkill_exit(samsung);
return ret;
}
static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
{
if (samsung->config->sabi_version == 2)
return samsung_rfkill_init_seclinux(samsung);
if (samsung->config->sabi_version == 3)
return samsung_rfkill_init_swsmi(samsung);
return 0;
}
static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
{
if (samsung->quirks->lid_handling)
write_lid_handling(samsung, 0);
}
static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
{
int retval = 0;
if (samsung->quirks->lid_handling)
retval = write_lid_handling(samsung, 1);
return retval;
}
static int kbd_backlight_enable(struct samsung_laptop *samsung)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int retval;
if (commands->kbd_backlight == 0xFFFF)
return -ENODEV;
memset(&data, 0, sizeof(data));
data.d0 = 0xaabb;
retval = sabi_command(samsung, commands->kbd_backlight,
&data, &data);
if (retval)
return retval;
if (data.d0 != 0xccdd)
return -ENODEV;
return 0;
}
static int kbd_backlight_read(struct samsung_laptop *samsung)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
int retval;
memset(&data, 0, sizeof(data));
data.data[0] = 0x81;
retval = sabi_command(samsung, commands->kbd_backlight,
&data, &data);
if (retval)
return retval;
return data.data[0];
}
static int kbd_backlight_write(struct samsung_laptop *samsung, int brightness)
{
const struct sabi_commands *commands = &samsung->config->commands;
struct sabi_data data;
memset(&data, 0, sizeof(data));
data.d0 = 0x82 | ((brightness & 0xFF) << 8);
return sabi_command(samsung, commands->kbd_backlight,
&data, NULL);
}
static void kbd_led_update(struct work_struct *work)
{
struct samsung_laptop *samsung;
samsung = container_of(work, struct samsung_laptop, kbd_led_work);
kbd_backlight_write(samsung, samsung->kbd_led_wk);
}
static void kbd_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct samsung_laptop *samsung;
samsung = container_of(led_cdev, struct samsung_laptop, kbd_led);
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
}
static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
{
struct samsung_laptop *samsung;
samsung = container_of(led_cdev, struct samsung_laptop, kbd_led);
return kbd_backlight_read(samsung);
}
static void samsung_leds_exit(struct samsung_laptop *samsung)
{
led_classdev_unregister(&samsung->kbd_led);
if (samsung->led_workqueue)
destroy_workqueue(samsung->led_workqueue);
}
static int __init samsung_leds_init(struct samsung_laptop *samsung)
{
int ret = 0;
samsung->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!samsung->led_workqueue)
return -ENOMEM;
if (kbd_backlight_enable(samsung) >= 0) {
INIT_WORK(&samsung->kbd_led_work, kbd_led_update);
samsung->kbd_led.name = "samsung::kbd_backlight";
samsung->kbd_led.brightness_set = kbd_led_set;
samsung->kbd_led.brightness_get = kbd_led_get;
samsung->kbd_led.max_brightness = 8;
if (samsung->quirks->four_kbd_backlight_levels)
samsung->kbd_led.max_brightness = 4;
ret = led_classdev_register(&samsung->platform_device->dev,
&samsung->kbd_led);
}
if (ret)
samsung_leds_exit(samsung);
return ret;
}
static void samsung_backlight_exit(struct samsung_laptop *samsung)
{
if (samsung->backlight_device) {
backlight_device_unregister(samsung->backlight_device);
samsung->backlight_device = NULL;
}
}
static int __init samsung_backlight_init(struct samsung_laptop *samsung)
{
struct backlight_device *bd;
struct backlight_properties props;
if (!samsung->handle_backlight)
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = samsung->config->max_brightness -
samsung->config->min_brightness;
bd = backlight_device_register("samsung",
&samsung->platform_device->dev,
samsung, &backlight_ops,
&props);
if (IS_ERR(bd))
return PTR_ERR(bd);
samsung->backlight_device = bd;
samsung->backlight_device->props.brightness = read_brightness(samsung);
samsung->backlight_device->props.power = FB_BLANK_UNBLANK;
backlight_update_status(samsung->backlight_device);
return 0;
}
static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct samsung_laptop *samsung = dev_get_drvdata(dev);
bool ok = true;
if (attr == &dev_attr_performance_level.attr)
ok = !!samsung->config->performance_levels[0].name;
if (attr == &dev_attr_battery_life_extender.attr)
ok = !!(read_battery_life_extender(samsung) >= 0);
if (attr == &dev_attr_usb_charge.attr)
ok = !!(read_usb_charge(samsung) >= 0);
if (attr == &dev_attr_lid_handling.attr)
ok = !!(read_lid_handling(samsung) >= 0);
return ok ? attr->mode : 0;
}
static const struct attribute_group platform_attribute_group = {
.is_visible = samsung_sysfs_is_visible,
.attrs = platform_attributes
};
static void samsung_sysfs_exit(struct samsung_laptop *samsung)
{
struct platform_device *device = samsung->platform_device;
sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
}
static int __init samsung_sysfs_init(struct samsung_laptop *samsung)
{
struct platform_device *device = samsung->platform_device;
return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
}
static int samsung_laptop_call_show(struct seq_file *m, void *data)
{
struct samsung_laptop *samsung = m->private;
struct sabi_data *sdata = &samsung->debug.data;
int ret;
seq_printf(m, "SABI 0x%04x {0x%08x, 0x%08x, 0x%04x, 0x%02x}\n",
samsung->debug.command,
sdata->d0, sdata->d1, sdata->d2, sdata->d3);
ret = sabi_command(samsung, samsung->debug.command, sdata, sdata);
if (ret) {
seq_printf(m, "SABI command 0x%04x failed\n",
samsung->debug.command);
return ret;
}
seq_printf(m, "SABI {0x%08x, 0x%08x, 0x%04x, 0x%02x}\n",
sdata->d0, sdata->d1, sdata->d2, sdata->d3);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(samsung_laptop_call);
static void samsung_debugfs_exit(struct samsung_laptop *samsung)
{
debugfs_remove_recursive(samsung->debug.root);
}
static void samsung_debugfs_init(struct samsung_laptop *samsung)
{
struct dentry *root;
root = debugfs_create_dir("samsung-laptop", NULL);
samsung->debug.root = root;
samsung->debug.f0000_wrapper.data = samsung->f0000_segment;
samsung->debug.f0000_wrapper.size = 0xffff;
samsung->debug.data_wrapper.data = &samsung->debug.data;
samsung->debug.data_wrapper.size = sizeof(samsung->debug.data);
samsung->debug.sdiag_wrapper.data = samsung->sdiag;
samsung->debug.sdiag_wrapper.size = strlen(samsung->sdiag);
debugfs_create_u16("command", 0644, root, &samsung->debug.command);
debugfs_create_u32("d0", 0644, root, &samsung->debug.data.d0);
debugfs_create_u32("d1", 0644, root, &samsung->debug.data.d1);
debugfs_create_u16("d2", 0644, root, &samsung->debug.data.d2);
debugfs_create_u8("d3", 0644, root, &samsung->debug.data.d3);
debugfs_create_blob("data", 0444, root, &samsung->debug.data_wrapper);
debugfs_create_blob("f0000_segment", 0400, root,
&samsung->debug.f0000_wrapper);
debugfs_create_file("call", 0444, root, samsung,
&samsung_laptop_call_fops);
debugfs_create_blob("sdiag", 0444, root, &samsung->debug.sdiag_wrapper);
}
static void samsung_sabi_exit(struct samsung_laptop *samsung)
{
const struct sabi_config *config = samsung->config;
/* Turn off "Linux" mode in the BIOS */
if (config && config->commands.set_linux != 0xff)
sabi_set_commandb(samsung, config->commands.set_linux, 0x80);
if (samsung->sabi_iface) {
iounmap(samsung->sabi_iface);
samsung->sabi_iface = NULL;
}
if (samsung->f0000_segment) {
iounmap(samsung->f0000_segment);
samsung->f0000_segment = NULL;
}
samsung->config = NULL;
}
static __init void samsung_sabi_infos(struct samsung_laptop *samsung, int loca,
unsigned int ifaceP)
{
const struct sabi_config *config = samsung->config;
printk(KERN_DEBUG "This computer supports SABI==%x\n",
loca + 0xf0000 - 6);
printk(KERN_DEBUG "SABI header:\n");
printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
readw(samsung->sabi + config->header_offsets.port));
printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
readb(samsung->sabi + config->header_offsets.iface_func));
printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
readb(samsung->sabi + config->header_offsets.en_mem));
printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
readb(samsung->sabi + config->header_offsets.re_mem));
printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
readw(samsung->sabi + config->header_offsets.data_offset));
printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
readw(samsung->sabi + config->header_offsets.data_segment));
printk(KERN_DEBUG " SABI pointer = 0x%08x\n", ifaceP);
}
static void __init samsung_sabi_diag(struct samsung_laptop *samsung)
{
int loca = find_signature(samsung->f0000_segment, "SDiaG@");
int i;
if (loca == 0xffff)
return ;
/* Example:
* Ident: @SDiaG@686XX-N90X3A/966-SEC-07HL-S90X3A
*
* Product name: 90X3A
* BIOS Version: 07HL
*/
loca += 1;
for (i = 0; loca < 0xffff && i < sizeof(samsung->sdiag) - 1; loca++) {
char temp = readb(samsung->f0000_segment + loca);
if (isalnum(temp) || temp == '/' || temp == '-')
samsung->sdiag[i++] = temp;
else
break ;
}
if (debug && samsung->sdiag[0])
pr_info("sdiag: %s", samsung->sdiag);
}
static int __init samsung_sabi_init(struct samsung_laptop *samsung)
{
const struct sabi_config *config = NULL;
const struct sabi_commands *commands;
unsigned int ifaceP;
int loca = 0xffff;
int ret = 0;
int i;
samsung->f0000_segment = ioremap(0xf0000, 0xffff);
if (!samsung->f0000_segment) {
if (debug || force)
pr_err("Can't map the segment at 0xf0000\n");
ret = -EINVAL;
goto exit;
}
samsung_sabi_diag(samsung);
/* Try to find one of the signatures in memory to find the header */
for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
samsung->config = &sabi_configs[i];
loca = find_signature(samsung->f0000_segment,
samsung->config->test_string);
if (loca != 0xffff)
break;
}
if (loca == 0xffff) {
if (debug || force)
pr_err("This computer does not support SABI\n");
ret = -ENODEV;
goto exit;
}
config = samsung->config;
commands = &config->commands;
/* point to the SMI port Number */
loca += 1;
samsung->sabi = (samsung->f0000_segment + loca);
/* Get a pointer to the SABI Interface */
ifaceP = (readw(samsung->sabi + config->header_offsets.data_segment) & 0x0ffff) << 4;
ifaceP += readw(samsung->sabi + config->header_offsets.data_offset) & 0x0ffff;
if (debug)
samsung_sabi_infos(samsung, loca, ifaceP);
samsung->sabi_iface = ioremap(ifaceP, 16);
if (!samsung->sabi_iface) {
pr_err("Can't remap %x\n", ifaceP);
ret = -EINVAL;
goto exit;
}
/* Turn on "Linux" mode in the BIOS */
if (commands->set_linux != 0xff) {
int retval = sabi_set_commandb(samsung,
commands->set_linux, 0x81);
if (retval) {
pr_warn("Linux mode was not set!\n");
ret = -ENODEV;
goto exit;
}
}
/* Check for stepping quirk */
if (samsung->handle_backlight)
check_for_stepping_quirk(samsung);
pr_info("detected SABI interface: %s\n",
samsung->config->test_string);
exit:
if (ret)
samsung_sabi_exit(samsung);
return ret;
}
static void samsung_platform_exit(struct samsung_laptop *samsung)
{
if (samsung->platform_device) {
platform_device_unregister(samsung->platform_device);
samsung->platform_device = NULL;
}
}
static int samsung_pm_notification(struct notifier_block *nb,
unsigned long val, void *ptr)
{
struct samsung_laptop *samsung;
samsung = container_of(nb, struct samsung_laptop, pm_nb);
if (val == PM_POST_HIBERNATION &&
samsung->quirks->enable_kbd_backlight)
kbd_backlight_enable(samsung);
if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
write_lid_handling(samsung, 1);
return 0;
}
static int __init samsung_platform_init(struct samsung_laptop *samsung)
{
struct platform_device *pdev;
pdev = platform_device_register_simple("samsung", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
samsung->platform_device = pdev;
platform_set_drvdata(samsung->platform_device, samsung);
return 0;
}
static struct samsung_quirks *quirks;
static int __init samsung_dmi_matched(const struct dmi_system_id *d)
{
quirks = d->driver_data;
return 0;
}
static const struct dmi_system_id samsung_dmi_table[] __initconst = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
/* DMI ids for laptops with bad Chassis Type */
{
.ident = "R40/R41",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "R40/R41"),
DMI_MATCH(DMI_BOARD_NAME, "R40/R41"),
},
},
/* Specific DMI ids for laptop with quirks */
{
.callback = samsung_dmi_matched,
.ident = "730U3E/740U3E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
},
.driver_data = &samsung_np740u3e,
},
{
.callback = samsung_dmi_matched,
.ident = "300V3Z/300V4Z/300V5Z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
},
.driver_data = &samsung_lid_handling,
},
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
static struct platform_device *samsung_platform_device;
static int __init samsung_init(void)
{
struct samsung_laptop *samsung;
int ret;
if (efi_enabled(EFI_BOOT))
return -ENODEV;
quirks = &samsung_unknown;
if (!force && !dmi_check_system(samsung_dmi_table))
return -ENODEV;
samsung = kzalloc(sizeof(*samsung), GFP_KERNEL);
if (!samsung)
return -ENOMEM;
mutex_init(&samsung->sabi_mutex);
samsung->handle_backlight = true;
samsung->quirks = quirks;
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
samsung->handle_backlight = false;
ret = samsung_platform_init(samsung);
if (ret)
goto error_platform;
ret = samsung_sabi_init(samsung);
if (ret)
goto error_sabi;
ret = samsung_sysfs_init(samsung);
if (ret)
goto error_sysfs;
ret = samsung_backlight_init(samsung);
if (ret)
goto error_backlight;
ret = samsung_rfkill_init(samsung);
if (ret)
goto error_rfkill;
ret = samsung_leds_init(samsung);
if (ret)
goto error_leds;
ret = samsung_lid_handling_init(samsung);
if (ret)
goto error_lid_handling;
samsung_debugfs_init(samsung);
samsung->pm_nb.notifier_call = samsung_pm_notification;
register_pm_notifier(&samsung->pm_nb);
samsung_platform_device = samsung->platform_device;
return ret;
error_lid_handling:
samsung_leds_exit(samsung);
error_leds:
samsung_rfkill_exit(samsung);
error_rfkill:
samsung_backlight_exit(samsung);
error_backlight:
samsung_sysfs_exit(samsung);
error_sysfs:
samsung_sabi_exit(samsung);
error_sabi:
samsung_platform_exit(samsung);
error_platform:
kfree(samsung);
return ret;
}
static void __exit samsung_exit(void)
{
struct samsung_laptop *samsung;
samsung = platform_get_drvdata(samsung_platform_device);
unregister_pm_notifier(&samsung->pm_nb);
samsung_debugfs_exit(samsung);
samsung_lid_handling_exit(samsung);
samsung_leds_exit(samsung);
samsung_rfkill_exit(samsung);
samsung_backlight_exit(samsung);
samsung_sysfs_exit(samsung);
samsung_sabi_exit(samsung);
samsung_platform_exit(samsung);
kfree(samsung);
samsung_platform_device = NULL;
}
module_init(samsung_init);
module_exit(samsung_exit);
MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
MODULE_DESCRIPTION("Samsung Backlight driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/samsung-laptop.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Atom SoC Power Management Controller Driver
* Copyright (c) 2014-2015,2017,2022 Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_data/x86/pmc_atom.h>
#include <linux/platform_data/x86/simatic-ipc.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
struct pmc_bit_map {
const char *name;
u32 bit_mask;
};
struct pmc_reg_map {
const struct pmc_bit_map *d3_sts_0;
const struct pmc_bit_map *d3_sts_1;
const struct pmc_bit_map *func_dis;
const struct pmc_bit_map *func_dis_2;
const struct pmc_bit_map *pss;
};
struct pmc_data {
const struct pmc_reg_map *map;
const struct pmc_clk *clks;
};
struct pmc_dev {
u32 base_addr;
void __iomem *regmap;
const struct pmc_reg_map *map;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
bool init;
};
static struct pmc_dev pmc_device;
static u32 acpi_base_addr;
static const struct pmc_clk byt_clks[] = {
{
.name = "xtal",
.freq = 25000000,
.parent_name = NULL,
},
{
.name = "pll",
.freq = 19200000,
.parent_name = "xtal",
},
{}
};
static const struct pmc_clk cht_clks[] = {
{
.name = "xtal",
.freq = 19200000,
.parent_name = NULL,
},
{}
};
static const struct pmc_bit_map d3_sts_0_map[] = {
{"LPSS1_F0_DMA", BIT_LPSS1_F0_DMA},
{"LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1},
{"LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2},
{"LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1},
{"LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2},
{"LPSS1_F5_SPI", BIT_LPSS1_F5_SPI},
{"LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX},
{"LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX},
{"SCC_EMMC", BIT_SCC_EMMC},
{"SCC_SDIO", BIT_SCC_SDIO},
{"SCC_SDCARD", BIT_SCC_SDCARD},
{"SCC_MIPI", BIT_SCC_MIPI},
{"HDA", BIT_HDA},
{"LPE", BIT_LPE},
{"OTG", BIT_OTG},
{"USH", BIT_USH},
{"GBE", BIT_GBE},
{"SATA", BIT_SATA},
{"USB_EHCI", BIT_USB_EHCI},
{"SEC", BIT_SEC},
{"PCIE_PORT0", BIT_PCIE_PORT0},
{"PCIE_PORT1", BIT_PCIE_PORT1},
{"PCIE_PORT2", BIT_PCIE_PORT2},
{"PCIE_PORT3", BIT_PCIE_PORT3},
{"LPSS2_F0_DMA", BIT_LPSS2_F0_DMA},
{"LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1},
{"LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2},
{"LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3},
{"LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4},
{"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
{"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
{"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
{}
};
static struct pmc_bit_map byt_d3_sts_1_map[] = {
{"SMB", BIT_SMB},
{"OTG_SS_PHY", BIT_OTG_SS_PHY},
{"USH_SS_PHY", BIT_USH_SS_PHY},
{"DFX", BIT_DFX},
{}
};
static struct pmc_bit_map cht_d3_sts_1_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_STS_GMM},
{"ISH", BIT_STS_ISH},
{}
};
static struct pmc_bit_map cht_func_dis_2_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_FD_GMM},
{"ISH", BIT_FD_ISH},
{}
};
static const struct pmc_bit_map byt_pss_map[] = {
{"GBE", PMC_PSS_BIT_GBE},
{"SATA", PMC_PSS_BIT_SATA},
{"HDA", PMC_PSS_BIT_HDA},
{"SEC", PMC_PSS_BIT_SEC},
{"PCIE", PMC_PSS_BIT_PCIE},
{"LPSS", PMC_PSS_BIT_LPSS},
{"LPE", PMC_PSS_BIT_LPE},
{"DFX", PMC_PSS_BIT_DFX},
{"USH_CTRL", PMC_PSS_BIT_USH_CTRL},
{"USH_SUS", PMC_PSS_BIT_USH_SUS},
{"USH_VCCS", PMC_PSS_BIT_USH_VCCS},
{"USH_VCCA", PMC_PSS_BIT_USH_VCCA},
{"OTG_CTRL", PMC_PSS_BIT_OTG_CTRL},
{"OTG_VCCS", PMC_PSS_BIT_OTG_VCCS},
{"OTG_VCCA_CLK", PMC_PSS_BIT_OTG_VCCA_CLK},
{"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
{"USB", PMC_PSS_BIT_USB},
{"USB_SUS", PMC_PSS_BIT_USB_SUS},
{}
};
static const struct pmc_bit_map cht_pss_map[] = {
{"SATA", PMC_PSS_BIT_SATA},
{"HDA", PMC_PSS_BIT_HDA},
{"SEC", PMC_PSS_BIT_SEC},
{"PCIE", PMC_PSS_BIT_PCIE},
{"LPSS", PMC_PSS_BIT_LPSS},
{"LPE", PMC_PSS_BIT_LPE},
{"UFS", PMC_PSS_BIT_CHT_UFS},
{"UXD", PMC_PSS_BIT_CHT_UXD},
{"UXD_FD", PMC_PSS_BIT_CHT_UXD_FD},
{"UX_ENG", PMC_PSS_BIT_CHT_UX_ENG},
{"USB_SUS", PMC_PSS_BIT_CHT_USB_SUS},
{"GMM", PMC_PSS_BIT_CHT_GMM},
{"ISH", PMC_PSS_BIT_CHT_ISH},
{"DFX_MASTER", PMC_PSS_BIT_CHT_DFX_MASTER},
{"DFX_CLUSTER1", PMC_PSS_BIT_CHT_DFX_CLUSTER1},
{"DFX_CLUSTER2", PMC_PSS_BIT_CHT_DFX_CLUSTER2},
{"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
{"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
{"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
{}
};
static const struct pmc_reg_map byt_reg_map = {
.d3_sts_0 = d3_sts_0_map,
.d3_sts_1 = byt_d3_sts_1_map,
.func_dis = d3_sts_0_map,
.func_dis_2 = byt_d3_sts_1_map,
.pss = byt_pss_map,
};
static const struct pmc_reg_map cht_reg_map = {
.d3_sts_0 = d3_sts_0_map,
.d3_sts_1 = cht_d3_sts_1_map,
.func_dis = d3_sts_0_map,
.func_dis_2 = cht_func_dis_2_map,
.pss = cht_pss_map,
};
static const struct pmc_data byt_data = {
.map = &byt_reg_map,
.clks = byt_clks,
};
static const struct pmc_data cht_data = {
.map = &cht_reg_map,
.clks = cht_clks,
};
static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
{
return readl(pmc->regmap + reg_offset);
}
static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
{
writel(val, pmc->regmap + reg_offset);
}
int pmc_atom_read(int offset, u32 *value)
{
struct pmc_dev *pmc = &pmc_device;
if (!pmc->init)
return -ENODEV;
*value = pmc_reg_read(pmc, offset);
return 0;
}
static void pmc_power_off(void)
{
u16 pm1_cnt_port;
u32 pm1_cnt_value;
pr_info("Preparing to enter system sleep state S5\n");
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
outl(pm1_cnt_value, pm1_cnt_port);
}
static void pmc_hw_reg_setup(struct pmc_dev *pmc)
{
/*
* Disable PMC S0IX_WAKE_EN events coming from:
* - LPC clock run
* - GPIO_SUS ored dedicated IRQs
* - GPIO_SCORE ored dedicated IRQs
* - GPIO_SUS shared IRQ
* - GPIO_SCORE shared IRQ
*/
pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
}
#ifdef CONFIG_DEBUG_FS
static void pmc_dev_state_print(struct seq_file *s, int reg_index,
u32 sts, const struct pmc_bit_map *sts_map,
u32 fd, const struct pmc_bit_map *fd_map)
{
int offset = PMC_REG_BIT_WIDTH * reg_index;
int index;
for (index = 0; sts_map[index].name; index++) {
seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n",
offset + index, sts_map[index].name,
fd_map[index].bit_mask & fd ? "Disabled" : "Enabled ",
sts_map[index].bit_mask & sts ? "D3" : "D0");
}
}
static int pmc_dev_state_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmc = s->private;
const struct pmc_reg_map *m = pmc->map;
u32 func_dis, func_dis_2;
u32 d3_sts_0, d3_sts_1;
func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
/* Low part */
pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis);
/* High part */
pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_dev_state);
static int pmc_pss_state_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmc = s->private;
const struct pmc_bit_map *map = pmc->map->pss;
u32 pss = pmc_reg_read(pmc, PMC_PSS);
int index;
for (index = 0; map[index].name; index++) {
seq_printf(s, "Island: %-2d - %-32s\tState: %s\n",
index, map[index].name,
map[index].bit_mask & pss ? "Off" : "On");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_pss_state);
static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmc = s->private;
u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
seq_printf(s, "S0 Residency:\t%lldus\n", s0_tmr);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pmc_sleep_tmr);
static void pmc_dbgfs_register(struct pmc_dev *pmc)
{
struct dentry *dir;
dir = debugfs_create_dir("pmc_atom", NULL);
pmc->dbgfs_dir = dir;
debugfs_create_file("dev_state", S_IFREG | S_IRUGO, dir, pmc,
&pmc_dev_state_fops);
debugfs_create_file("pss_state", S_IFREG | S_IRUGO, dir, pmc,
&pmc_pss_state_fops);
debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, dir, pmc,
&pmc_sleep_tmr_fops);
}
#else
static void pmc_dbgfs_register(struct pmc_dev *pmc)
{
}
#endif /* CONFIG_DEBUG_FS */
static bool pmc_clk_is_critical = true;
static int dmi_callback(const struct dmi_system_id *d)
{
pr_info("%s: PMC critical clocks quirk enabled\n", d->ident);
return 1;
}
static int dmi_callback_siemens(const struct dmi_system_id *d)
{
u32 st_id;
if (dmi_walk(simatic_ipc_find_dmi_entry_helper, &st_id))
goto out;
if (st_id == SIMATIC_IPC_IPC227E || st_id == SIMATIC_IPC_IPC277E)
return dmi_callback(d);
out:
pmc_clk_is_critical = false;
return 1;
}
/*
* Some systems need one or more of their pmc_plt_clks to be
* marked as critical.
*/
static const struct dmi_system_id critclk_systems[] = {
{
/* pmc_plt_clk0 is used for an external HSIC USB HUB */
.ident = "MPL CEC1x",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
},
},
{
/*
* Lex System / Lex Computech Co. makes a lot of Bay Trail
* based embedded boards which often come with multiple
* ethernet controllers using multiple pmc_plt_clks. See:
* https://www.lex.com.tw/products/embedded-ipc-board/
*/
.ident = "Lex BayTrail",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Lex BayTrail"),
},
},
{
/* pmc_plt_clk* - are used for ethernet controllers */
.ident = "Beckhoff Baytrail",
.callback = dmi_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
},
},
{
.ident = "SIEMENS AG",
.callback = dmi_callback_siemens,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
},
},
{}
};
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
const struct pmc_data *pmc_data)
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
if (dmi_check_system(critclk_systems))
clk_data->critical = pmc_clk_is_critical;
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,
clk_data, sizeof(*clk_data));
if (IS_ERR(clkdev)) {
kfree(clk_data);
return PTR_ERR(clkdev);
}
kfree(clk_data);
return 0;
}
static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pmc_dev *pmc = &pmc_device;
const struct pmc_data *data = (struct pmc_data *)ent->driver_data;
const struct pmc_reg_map *map = data->map;
int ret;
/* Obtain ACPI base address */
pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
acpi_base_addr &= ACPI_BASE_ADDR_MASK;
/* Install power off function */
if (acpi_base_addr != 0 && pm_power_off == NULL)
pm_power_off = pmc_power_off;
pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
pmc->base_addr &= PMC_BASE_ADDR_MASK;
pmc->regmap = ioremap(pmc->base_addr, PMC_MMIO_REG_LEN);
if (!pmc->regmap) {
dev_err(&pdev->dev, "error: ioremap failed\n");
return -ENOMEM;
}
pmc->map = map;
/* PMC hardware registers setup */
pmc_hw_reg_setup(pmc);
pmc_dbgfs_register(pmc);
/* Register platform clocks - PMC_PLT_CLK [0..5] */
ret = pmc_setup_clks(pdev, pmc->regmap, data);
if (ret)
dev_warn(&pdev->dev, "platform clocks register failed: %d\n",
ret);
pmc->init = true;
return ret;
}
/* Data for PCI driver interface used by pci_match_id() call below */
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_data },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_data },
{}
};
static int __init pmc_atom_init(void)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
/*
* We look for our device - PCU PMC.
* We assume that there is maximum one device.
*
* We can't use plain pci_driver mechanism,
* as the device is really a multiple function device,
* main driver that binds to the pci_device is lpc_ich
* and have to find & bind to the device this way.
*/
for_each_pci_dev(pdev) {
ent = pci_match_id(pmc_pci_ids, pdev);
if (ent)
return pmc_setup_dev(pdev, ent);
}
/* Device not found */
return -ENODEV;
}
device_initcall(pmc_atom_init);
/*
MODULE_AUTHOR("Aubrey Li <[email protected]>");
MODULE_DESCRIPTION("Intel Atom SoC Power Management Controller Interface");
MODULE_LICENSE("GPL v2");
*/
| linux-master | drivers/platform/x86/pmc_atom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010,2015 Intel Corporation
* Author: Sreedhara DS ([email protected])
*
* SCU running in ARC processor communicates with other entity running in IA
* core through IPC mechanism which in turn messaging between IA core ad SCU.
* SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
* SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
* IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
* along with other APIs.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
/* Command id associated with message IPCMSG_PCNTRL */
#define IPC_CMD_PCNTRL_W 0 /* Register write */
#define IPC_CMD_PCNTRL_R 1 /* Register read */
#define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
/*
* IPC register summary
*
* IPC register blocks are memory mapped at fixed address of PCI BAR 0.
* To read or write information to the SCU, driver writes to IPC-1 memory
* mapped registers. The following is the IPC mechanism
*
* 1. IA core cDMI interface claims this transaction and converts it to a
* Transaction Layer Packet (TLP) message which is sent across the cDMI.
*
* 2. South Complex cDMI block receives this message and writes it to
* the IPC-1 register block, causing an interrupt to the SCU
*
* 3. SCU firmware decodes this interrupt and IPC message and the appropriate
* message handler is called within firmware.
*/
#define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
#define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
#define IPC_IOC 0x100 /* IPC command register IOC bit */
struct intel_scu_ipc_dev {
struct device dev;
struct resource mem;
struct module *owner;
int irq;
void __iomem *ipc_base;
struct completion cmd_complete;
};
#define IPC_STATUS 0x04
#define IPC_STATUS_IRQ BIT(2)
#define IPC_STATUS_ERR BIT(1)
#define IPC_STATUS_BUSY BIT(0)
/*
* IPC Write/Read Buffers:
* 16 byte buffer for sending and receiving data to and from SCU.
*/
#define IPC_WRITE_BUFFER 0x80
#define IPC_READ_BUFFER 0x90
/* Timeout in jiffies */
#define IPC_TIMEOUT (10 * HZ)
static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
static struct class intel_scu_ipc_class = {
.name = "intel_scu_ipc",
};
/**
* intel_scu_ipc_dev_get() - Get SCU IPC instance
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance. This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance.
*
* Call intel_scu_ipc_dev_put() to release the instance.
*
* Returns %NULL if SCU IPC is not currently available.
*/
struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
{
struct intel_scu_ipc_dev *scu = NULL;
mutex_lock(&ipclock);
if (ipcdev) {
get_device(&ipcdev->dev);
/*
* Prevent the IPC provider from being unloaded while it
* is being used.
*/
if (!try_module_get(ipcdev->owner))
put_device(&ipcdev->dev);
else
scu = ipcdev;
}
mutex_unlock(&ipclock);
return scu;
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
/**
* intel_scu_ipc_dev_put() - Put SCU IPC instance
* @scu: SCU IPC instance
*
* This function releases the SCU IPC instance retrieved from
* intel_scu_ipc_dev_get() and allows the driver providing IPC to be
* unloaded.
*/
void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
{
if (scu) {
module_put(scu->owner);
put_device(&scu->dev);
}
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
struct intel_scu_ipc_devres {
struct intel_scu_ipc_dev *scu;
};
static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
{
struct intel_scu_ipc_devres *dr = res;
struct intel_scu_ipc_dev *scu = dr->scu;
intel_scu_ipc_dev_put(scu);
}
/**
* devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
* @dev: Device requesting the SCU IPC device
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance. This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance.
*
* Returns %NULL if SCU IPC is not currently available.
*/
struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
{
struct intel_scu_ipc_devres *dr;
struct intel_scu_ipc_dev *scu;
dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
scu = intel_scu_ipc_dev_get();
if (!scu) {
devres_free(dr);
return NULL;
}
dr->scu = scu;
devres_add(dev, dr);
return scu;
}
EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
/*
* Send ipc command
* Command Register (Write Only):
* A write to this register results in an interrupt to the SCU core processor
* Format:
* |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
*/
static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
{
reinit_completion(&scu->cmd_complete);
writel(cmd | IPC_IOC, scu->ipc_base);
}
/*
* Write ipc data
* IPC Write Buffer (Write Only):
* 16-byte buffer for sending data associated with IPC command to
* SCU. Size of the data is specified in the IPC_COMMAND_REG register
*/
static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
{
writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
}
/*
* Status Register (Read Only):
* Driver will read this register to get the ready/busy status of the IPC
* block and error status of the IPC command that was just processed by SCU
* Format:
* |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
*/
static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
{
return __raw_readl(scu->ipc_base + IPC_STATUS);
}
/* Read ipc byte data */
static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
{
return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
}
/* Read ipc u32 data */
static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
{
return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
}
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
u8 status;
int err;
err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
100, jiffies_to_usecs(IPC_TIMEOUT));
if (err)
return err;
return (status & IPC_STATUS_ERR) ? -EIO : 0;
}
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
status = ipc_read_status(scu);
if (status & IPC_STATUS_BUSY)
return -ETIMEDOUT;
if (status & IPC_STATUS_ERR)
return -EIO;
return 0;
}
static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
{
return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
{
u8 status;
if (!scu)
scu = ipcdev;
if (!scu)
return ERR_PTR(-ENODEV);
status = ipc_read_status(scu);
if (status & IPC_STATUS_BUSY) {
dev_dbg(&scu->dev, "device is busy\n");
return ERR_PTR(-EBUSY);
}
return scu;
}
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
u32 count, u32 op, u32 id)
{
int nc;
u32 offset = 0;
int err;
u8 cbuf[IPC_WWBUF_SIZE];
u32 *wbuf = (u32 *)&cbuf;
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
scu = intel_scu_ipc_get(scu);
if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
return PTR_ERR(scu);
}
for (nc = 0; nc < count; nc++, offset += 2) {
cbuf[offset] = addr[nc];
cbuf[offset + 1] = addr[nc] >> 8;
}
if (id == IPC_CMD_PCNTRL_R) {
for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
ipc_data_writel(scu, wbuf[nc], offset);
ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
} else if (id == IPC_CMD_PCNTRL_W) {
for (nc = 0; nc < count; nc++, offset += 1)
cbuf[offset] = data[nc];
for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
ipc_data_writel(scu, wbuf[nc], offset);
ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
} else if (id == IPC_CMD_PCNTRL_M) {
cbuf[offset] = data[0];
cbuf[offset + 1] = data[1];
ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
}
err = intel_scu_ipc_check_status(scu);
if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
/* Workaround: values are read as 0 without memcpy_fromio */
memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
for (nc = 0; nc < count; nc++)
data[nc] = ipc_data_readb(scu, nc);
}
mutex_unlock(&ipclock);
return err;
}
/**
* intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
* @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Return pointer for read byte
*
* Read a single register. Returns %0 on success or an error code. All
* locking between SCU accesses is handled for the caller.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
{
return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
/**
* intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
* @scu: Optional SCU IPC instance
* @addr: Register on SCU
* @data: Byte to write
*
* Write a single register. Returns %0 on success or an error code. All
* locking between SCU accesses is handled for the caller.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
{
return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
/**
* intel_scu_ipc_dev_readv() - Read a set of registers
* @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to return
* @len: Length of array
*
* Read registers. Returns %0 on success or an error code. All locking
* between SCU accesses is handled for the caller.
*
* The largest array length permitted by the hardware is 5 items.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
size_t len)
{
return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
/**
* intel_scu_ipc_dev_writev() - Write a set of registers
* @scu: Optional SCU IPC instance
* @addr: Register list
* @data: Bytes to write
* @len: Length of array
*
* Write registers. Returns %0 on success or an error code. All locking
* between SCU accesses is handled for the caller.
*
* The largest array length permitted by the hardware is 5 items.
*
* This function may sleep.
*/
int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
size_t len)
{
return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
/**
* intel_scu_ipc_dev_update() - Update a register
* @scu: Optional SCU IPC instance
* @addr: Register address
* @data: Bits to update
* @mask: Mask of bits to update
*
* Read-modify-write power control unit register. The first data argument
* must be register value and second is mask value mask is a bitmap that
* indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
* modify this bit. returns %0 on success or an error code.
*
* This function may sleep. Locking between SCU accesses is handled
* for the caller.
*/
int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
u8 mask)
{
u8 tmp[2] = { data, mask };
return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
}
EXPORT_SYMBOL(intel_scu_ipc_dev_update);
/**
* intel_scu_ipc_dev_simple_command() - Send a simple command
* @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
*
* Issue a simple command to the SCU. Do not use this interface if you must
* then access data as any data values may be overwritten by another SCU
* access by the time this function returns.
*
* This function may sleep. Locking for SCU accesses is handled for the
* caller.
*/
int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
int sub)
{
u32 cmdval;
int err;
mutex_lock(&ipclock);
scu = intel_scu_ipc_get(scu);
if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
return PTR_ERR(scu);
}
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
mutex_unlock(&ipclock);
if (err)
dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
/**
* intel_scu_ipc_dev_command_with_size() - Command with data
* @scu: Optional SCU IPC instance
* @cmd: Command
* @sub: Sub type
* @in: Input data
* @inlen: Input length in bytes
* @size: Input size written to the IPC command register in whatever
* units (dword, byte) the particular firmware requires. Normally
* should be the same as @inlen.
* @out: Output data
* @outlen: Output length in bytes
*
* Issue a command to the SCU which involves data transfers. Do the
* data copies under the lock but leave it for the caller to interpret.
*/
int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
int sub, const void *in, size_t inlen,
size_t size, void *out, size_t outlen)
{
size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
u32 cmdval, inbuf[4] = {};
int i, err;
if (inbuflen > 4 || outbuflen > 4)
return -EINVAL;
mutex_lock(&ipclock);
scu = intel_scu_ipc_get(scu);
if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
return PTR_ERR(scu);
}
memcpy(inbuf, in, inlen);
for (i = 0; i < inbuflen; i++)
ipc_data_writel(scu, inbuf[i], 4 * i);
cmdval = (size << 16) | (sub << 12) | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
if (!err) {
u32 outbuf[4] = {};
for (i = 0; i < outbuflen; i++)
outbuf[i] = ipc_data_readl(scu, 4 * i);
memcpy(out, outbuf, outlen);
}
mutex_unlock(&ipclock);
if (err)
dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
}
EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1, caller api must wait for interrupt handler called
* which in turn unlocks the caller api. Currently this is not used
*
* This is edge triggered so we need take no action to clear anything
*/
static irqreturn_t ioc(int irq, void *dev_id)
{
struct intel_scu_ipc_dev *scu = dev_id;
int status = ipc_read_status(scu);
writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
complete(&scu->cmd_complete);
return IRQ_HANDLED;
}
static void intel_scu_ipc_release(struct device *dev)
{
struct intel_scu_ipc_dev *scu;
scu = container_of(dev, struct intel_scu_ipc_dev, dev);
if (scu->irq > 0)
free_irq(scu->irq, scu);
iounmap(scu->ipc_base);
release_mem_region(scu->mem.start, resource_size(&scu->mem));
kfree(scu);
}
/**
* __intel_scu_ipc_register() - Register SCU IPC device
* @parent: Parent device
* @scu_data: Data used to configure SCU IPC
* @owner: Module registering the SCU IPC device
*
* Call this function to register SCU IPC mechanism under @parent.
* Returns pointer to the new SCU IPC device or ERR_PTR() in case of
* failure. The caller may use the returned instance if it needs to do
* SCU IPC calls itself.
*/
struct intel_scu_ipc_dev *
__intel_scu_ipc_register(struct device *parent,
const struct intel_scu_ipc_data *scu_data,
struct module *owner)
{
int err;
struct intel_scu_ipc_dev *scu;
void __iomem *ipc_base;
mutex_lock(&ipclock);
/* We support only one IPC */
if (ipcdev) {
err = -EBUSY;
goto err_unlock;
}
scu = kzalloc(sizeof(*scu), GFP_KERNEL);
if (!scu) {
err = -ENOMEM;
goto err_unlock;
}
scu->owner = owner;
scu->dev.parent = parent;
scu->dev.class = &intel_scu_ipc_class;
scu->dev.release = intel_scu_ipc_release;
if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
"intel_scu_ipc")) {
err = -EBUSY;
goto err_free;
}
ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
if (!ipc_base) {
err = -ENOMEM;
goto err_release;
}
scu->ipc_base = ipc_base;
scu->mem = scu_data->mem;
scu->irq = scu_data->irq;
init_completion(&scu->cmd_complete);
if (scu->irq > 0) {
err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
if (err)
goto err_unmap;
}
/*
* After this point intel_scu_ipc_release() takes care of
* releasing the SCU IPC resources once refcount drops to zero.
*/
dev_set_name(&scu->dev, "intel_scu_ipc");
err = device_register(&scu->dev);
if (err) {
put_device(&scu->dev);
goto err_unlock;
}
/* Assign device at last */
ipcdev = scu;
mutex_unlock(&ipclock);
return scu;
err_unmap:
iounmap(ipc_base);
err_release:
release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
err_free:
kfree(scu);
err_unlock:
mutex_unlock(&ipclock);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
/**
* intel_scu_ipc_unregister() - Unregister SCU IPC
* @scu: SCU IPC handle
*
* This unregisters the SCU IPC device and releases the acquired
* resources once the refcount goes to zero.
*/
void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
{
mutex_lock(&ipclock);
if (!WARN_ON(!ipcdev)) {
ipcdev = NULL;
device_unregister(&scu->dev);
}
mutex_unlock(&ipclock);
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
{
struct intel_scu_ipc_devres *dr = res;
struct intel_scu_ipc_dev *scu = dr->scu;
intel_scu_ipc_unregister(scu);
}
/**
* __devm_intel_scu_ipc_register() - Register managed SCU IPC device
* @parent: Parent device
* @scu_data: Data used to configure SCU IPC
* @owner: Module registering the SCU IPC device
*
* Call this function to register managed SCU IPC mechanism under
* @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
* case of failure. The caller may use the returned instance if it needs
* to do SCU IPC calls itself.
*/
struct intel_scu_ipc_dev *
__devm_intel_scu_ipc_register(struct device *parent,
const struct intel_scu_ipc_data *scu_data,
struct module *owner)
{
struct intel_scu_ipc_devres *dr;
struct intel_scu_ipc_dev *scu;
dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
if (!dr)
return NULL;
scu = __intel_scu_ipc_register(parent, scu_data, owner);
if (IS_ERR(scu)) {
devres_free(dr);
return scu;
}
dr->scu = scu;
devres_add(parent, dr);
return scu;
}
EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
static int __init intel_scu_ipc_init(void)
{
return class_register(&intel_scu_ipc_class);
}
subsys_initcall(intel_scu_ipc_init);
static void __exit intel_scu_ipc_exit(void)
{
class_unregister(&intel_scu_ipc_class);
}
module_exit(intel_scu_ipc_exit);
| linux-master | drivers/platform/x86/intel_scu_ipc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for the Intel SCU.
*
* Copyright (C) 2019, Intel Corporation
* Authors: Divya Sasidharan <[email protected]>
* Mika Westerberg <[email protected]>
* Rajmohan Mani <[email protected]>
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/intel_scu_ipc.h>
static int intel_scu_platform_probe(struct platform_device *pdev)
{
struct intel_scu_ipc_data scu_data = {};
struct intel_scu_ipc_dev *scu;
const struct resource *res;
scu_data.irq = platform_get_irq_optional(pdev, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
scu_data.mem = *res;
scu = devm_intel_scu_ipc_register(&pdev->dev, &scu_data);
if (IS_ERR(scu))
return PTR_ERR(scu);
platform_set_drvdata(pdev, scu);
return 0;
}
static const struct acpi_device_id intel_scu_acpi_ids[] = {
{ "INTC1026" },
{}
};
MODULE_DEVICE_TABLE(acpi, intel_scu_acpi_ids);
static struct platform_driver intel_scu_platform_driver = {
.probe = intel_scu_platform_probe,
.driver = {
.name = "intel_scu",
.acpi_match_table = intel_scu_acpi_ids,
},
};
module_platform_driver(intel_scu_platform_driver);
MODULE_AUTHOR("Divya Sasidharan <[email protected]>");
MODULE_AUTHOR("Mika Westerberg <[email protected]");
MODULE_AUTHOR("Rajmohan Mani <[email protected]>");
MODULE_DESCRIPTION("Intel SCU platform driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/intel_scu_pltdrv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/types.h>
#include <linux/wmi.h>
#include <acpi/video.h>
static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
/**
* wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
* @w: Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
* @id: The WMI method ID to call (e.g. %WMI_BRIGHTNESS_METHOD_LEVEL or
* %WMI_BRIGHTNESS_METHOD_SOURCE)
* @mode: The operation to perform on the method (e.g. %WMI_BRIGHTNESS_MODE_SET
* or %WMI_BRIGHTNESS_MODE_GET)
* @val: Pointer to a value passed in by the caller when @mode is
* %WMI_BRIGHTNESS_MODE_SET, or a value passed out to caller when @mode
* is %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL.
*
* Returns 0 on success, or a negative error number on failure.
*/
static int wmi_brightness_notify(struct wmi_device *w, enum wmi_brightness_method id, enum wmi_brightness_mode mode, u32 *val)
{
struct wmi_brightness_args args = {
.mode = mode,
.val = 0,
.ret = 0,
};
struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
acpi_status status;
if (id < WMI_BRIGHTNESS_METHOD_LEVEL ||
id >= WMI_BRIGHTNESS_METHOD_MAX ||
mode < WMI_BRIGHTNESS_MODE_GET || mode >= WMI_BRIGHTNESS_MODE_MAX)
return -EINVAL;
if (mode == WMI_BRIGHTNESS_MODE_SET)
args.val = *val;
status = wmidev_evaluate_method(w, 0, id, &buf, &buf);
if (ACPI_FAILURE(status)) {
dev_err(&w->dev, "EC backlight control failed: %s\n",
acpi_format_exception(status));
return -EIO;
}
if (mode != WMI_BRIGHTNESS_MODE_SET)
*val = args.ret;
return 0;
}
static int nvidia_wmi_ec_backlight_update_status(struct backlight_device *bd)
{
struct wmi_device *wdev = bl_get_data(bd);
return wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
WMI_BRIGHTNESS_MODE_SET,
&bd->props.brightness);
}
static int nvidia_wmi_ec_backlight_get_brightness(struct backlight_device *bd)
{
struct wmi_device *wdev = bl_get_data(bd);
u32 level;
int ret;
ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
WMI_BRIGHTNESS_MODE_GET, &level);
if (ret < 0)
return ret;
return level;
}
static const struct backlight_ops nvidia_wmi_ec_backlight_ops = {
.update_status = nvidia_wmi_ec_backlight_update_status,
.get_brightness = nvidia_wmi_ec_backlight_get_brightness,
};
static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ctx)
{
struct backlight_properties props = {};
struct backlight_device *bdev;
int ret;
/* drivers/acpi/video_detect.c also checks that SOURCE == EC */
if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
return -ENODEV;
/*
* Identify this backlight device as a firmware device so that it can
* be prioritized over any exposed GPU-driven raw device(s).
*/
props.type = BACKLIGHT_FIRMWARE;
ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL,
&props.max_brightness);
if (ret)
return ret;
ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
WMI_BRIGHTNESS_MODE_GET, &props.brightness);
if (ret)
return ret;
bdev = devm_backlight_device_register(&wdev->dev,
"nvidia_wmi_ec_backlight",
&wdev->dev, wdev,
&nvidia_wmi_ec_backlight_ops,
&props);
return PTR_ERR_OR_ZERO(bdev);
}
static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = {
{ .guid_string = WMI_BRIGHTNESS_GUID },
{ }
};
MODULE_DEVICE_TABLE(wmi, nvidia_wmi_ec_backlight_id_table);
static struct wmi_driver nvidia_wmi_ec_backlight_driver = {
.driver = {
.name = "nvidia-wmi-ec-backlight",
},
.probe = nvidia_wmi_ec_backlight_probe,
.id_table = nvidia_wmi_ec_backlight_id_table,
};
module_wmi_driver(nvidia_wmi_ec_backlight_driver);
MODULE_AUTHOR("Daniel Dadap <[email protected]>");
MODULE_DESCRIPTION("NVIDIA WMI EC Backlight driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/nvidia-wmi-ec-backlight.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*-*-linux-c-*-*/
/*
Copyright (C) 2007,2008 Jonathan Woithe <[email protected]>
Copyright (C) 2008 Peter Gruber <[email protected]>
Copyright (C) 2008 Tony Vroon <[email protected]>
Based on earlier work:
Copyright (C) 2003 Shane Spencer <[email protected]>
Adrian Yee <[email protected]>
Templated from msi-laptop.c and thinkpad_acpi.c which is copyright
by its respective authors.
*/
/*
* fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
* features made available on a range of Fujitsu laptops including the
* P2xxx/P5xxx/S6xxx/S7xxx series.
*
* This driver implements a vendor-specific backlight control interface for
* Fujitsu laptops and provides support for hotkeys present on certain Fujitsu
* laptops.
*
* This driver has been tested on a Fujitsu Lifebook S6410, S7020 and
* P8010. It should work on most P-series and S-series Lifebooks, but
* YMMV.
*
* The module parameter use_alt_lcd_levels switches between different ACPI
* brightness controls which are used by different Fujitsu laptops. In most
* cases the correct method is automatically detected. "use_alt_lcd_levels=1"
* is applicable for a Fujitsu Lifebook S6410 if autodetection fails.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/fb.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kfifo.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <acpi/video.h>
#define FUJITSU_DRIVER_VERSION "0.6.0"
#define FUJITSU_LCD_N_LEVELS 8
#define ACPI_FUJITSU_CLASS "fujitsu"
#define ACPI_FUJITSU_BL_HID "FUJ02B1"
#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
#define ACPI_FUJITSU_NOTIFY_CODE 0x80
/* FUNC interface - command values */
#define FUNC_FLAGS BIT(12)
#define FUNC_LEDS (BIT(12) | BIT(0))
#define FUNC_BUTTONS (BIT(12) | BIT(1))
#define FUNC_BACKLIGHT (BIT(12) | BIT(2))
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
/* FUNC interface - status flags */
#define FLAG_RFKILL BIT(5)
#define FLAG_LID BIT(8)
#define FLAG_DOCK BIT(9)
#define FLAG_TOUCHPAD_TOGGLE BIT(26)
#define FLAG_MICMUTE BIT(29)
#define FLAG_SOFTKEYS (FLAG_RFKILL | FLAG_TOUCHPAD_TOGGLE | FLAG_MICMUTE)
/* FUNC interface - LED control */
#define FUNC_LED_OFF BIT(0)
#define FUNC_LED_ON (BIT(0) | BIT(16) | BIT(17))
#define LOGOLAMP_POWERON BIT(13)
#define LOGOLAMP_ALWAYS BIT(14)
#define KEYBOARD_LAMPS BIT(8)
#define RADIO_LED_ON BIT(5)
#define ECO_LED BIT(16)
#define ECO_LED_ON BIT(19)
/* FUNC interface - backlight power control */
#define BACKLIGHT_PARAM_POWER BIT(2)
#define BACKLIGHT_OFF (BIT(0) | BIT(1))
#define BACKLIGHT_ON 0
/* Scancodes read from the GIRB register */
#define KEY1_CODE 0x410
#define KEY2_CODE 0x411
#define KEY3_CODE 0x412
#define KEY4_CODE 0x413
#define KEY5_CODE 0x420
/* Hotkey ringbuffer limits */
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
#define RINGBUFFERSIZE 40
/* Module parameters */
static int use_alt_lcd_levels = -1;
static bool disable_brightness_adjust;
/* Device controlling the backlight and associated keys */
struct fujitsu_bl {
struct input_dev *input;
char phys[32];
struct backlight_device *bl_device;
unsigned int max_brightness;
unsigned int brightness_level;
};
static struct fujitsu_bl *fujitsu_bl;
/* Device used to access hotkeys and other features on the laptop */
struct fujitsu_laptop {
struct input_dev *input;
char phys[32];
struct platform_device *pf_device;
struct kfifo fifo;
spinlock_t fifo_lock;
int flags_supported;
int flags_state;
};
static struct acpi_device *fext;
/* Fujitsu ACPI interface function */
static int call_fext_func(struct acpi_device *device,
int func, int op, int feature, int state)
{
union acpi_object params[4] = {
{ .integer.type = ACPI_TYPE_INTEGER, .integer.value = func },
{ .integer.type = ACPI_TYPE_INTEGER, .integer.value = op },
{ .integer.type = ACPI_TYPE_INTEGER, .integer.value = feature },
{ .integer.type = ACPI_TYPE_INTEGER, .integer.value = state }
};
struct acpi_object_list arg_list = { 4, params };
unsigned long long value;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "FUNC", &arg_list,
&value);
if (ACPI_FAILURE(status)) {
acpi_handle_err(device->handle, "Failed to evaluate FUNC\n");
return -ENODEV;
}
acpi_handle_debug(device->handle,
"FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
func, op, feature, state, (int)value);
return value;
}
/* Hardware access for LCD brightness control */
static int set_lcd_level(struct acpi_device *device, int level)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
acpi_status status;
char *method;
switch (use_alt_lcd_levels) {
case -1:
if (acpi_has_method(device->handle, "SBL2"))
method = "SBL2";
else
method = "SBLL";
break;
case 1:
method = "SBL2";
break;
default:
method = "SBLL";
break;
}
acpi_handle_debug(device->handle, "set lcd level via %s [%d]\n", method,
level);
if (level < 0 || level >= priv->max_brightness)
return -EINVAL;
status = acpi_execute_simple_method(device->handle, method, level);
if (ACPI_FAILURE(status)) {
acpi_handle_err(device->handle, "Failed to evaluate %s\n",
method);
return -ENODEV;
}
priv->brightness_level = level;
return 0;
}
static int get_lcd_level(struct acpi_device *device)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
unsigned long long state = 0;
acpi_status status = AE_OK;
acpi_handle_debug(device->handle, "get lcd level via GBLL\n");
status = acpi_evaluate_integer(device->handle, "GBLL", NULL, &state);
if (ACPI_FAILURE(status))
return 0;
priv->brightness_level = state & 0x0fffffff;
return priv->brightness_level;
}
static int get_max_brightness(struct acpi_device *device)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
unsigned long long state = 0;
acpi_status status = AE_OK;
acpi_handle_debug(device->handle, "get max lcd level via RBLL\n");
status = acpi_evaluate_integer(device->handle, "RBLL", NULL, &state);
if (ACPI_FAILURE(status))
return -1;
priv->max_brightness = state;
return priv->max_brightness;
}
/* Backlight device stuff */
static int bl_get_brightness(struct backlight_device *b)
{
struct acpi_device *device = bl_get_data(b);
return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level(device);
}
static int bl_update_status(struct backlight_device *b)
{
struct acpi_device *device = bl_get_data(b);
if (fext) {
if (b->props.power == FB_BLANK_POWERDOWN)
call_fext_func(fext, FUNC_BACKLIGHT, 0x1,
BACKLIGHT_PARAM_POWER, BACKLIGHT_OFF);
else
call_fext_func(fext, FUNC_BACKLIGHT, 0x1,
BACKLIGHT_PARAM_POWER, BACKLIGHT_ON);
}
return set_lcd_level(device, b->props.brightness);
}
static const struct backlight_ops fujitsu_bl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_LID))
return sprintf(buf, "unknown\n");
if (priv->flags_state & FLAG_LID)
return sprintf(buf, "open\n");
else
return sprintf(buf, "closed\n");
}
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_DOCK))
return sprintf(buf, "unknown\n");
if (priv->flags_state & FLAG_DOCK)
return sprintf(buf, "docked\n");
else
return sprintf(buf, "undocked\n");
}
static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct fujitsu_laptop *priv = dev_get_drvdata(dev);
if (!(priv->flags_supported & FLAG_RFKILL))
return sprintf(buf, "unknown\n");
if (priv->flags_state & FLAG_RFKILL)
return sprintf(buf, "on\n");
else
return sprintf(buf, "killed\n");
}
static DEVICE_ATTR_RO(lid);
static DEVICE_ATTR_RO(dock);
static DEVICE_ATTR_RO(radios);
static struct attribute *fujitsu_pf_attributes[] = {
&dev_attr_lid.attr,
&dev_attr_dock.attr,
&dev_attr_radios.attr,
NULL
};
static const struct attribute_group fujitsu_pf_attribute_group = {
.attrs = fujitsu_pf_attributes
};
static struct platform_driver fujitsu_pf_driver = {
.driver = {
.name = "fujitsu-laptop",
}
};
/* ACPI device for LCD brightness control */
static const struct key_entry keymap_backlight[] = {
{ KE_KEY, true, { KEY_BRIGHTNESSUP } },
{ KE_KEY, false, { KEY_BRIGHTNESSDOWN } },
{ KE_END, 0 }
};
static int acpi_fujitsu_bl_input_setup(struct acpi_device *device)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
int ret;
priv->input = devm_input_allocate_device(&device->dev);
if (!priv->input)
return -ENOMEM;
snprintf(priv->phys, sizeof(priv->phys), "%s/video/input0",
acpi_device_hid(device));
priv->input->name = acpi_device_name(device);
priv->input->phys = priv->phys;
priv->input->id.bustype = BUS_HOST;
priv->input->id.product = 0x06;
ret = sparse_keymap_setup(priv->input, keymap_backlight, NULL);
if (ret)
return ret;
return input_register_device(priv->input);
}
static int fujitsu_backlight_register(struct acpi_device *device)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
const struct backlight_properties props = {
.brightness = priv->brightness_level,
.max_brightness = priv->max_brightness - 1,
.type = BACKLIGHT_PLATFORM
};
struct backlight_device *bd;
bd = devm_backlight_device_register(&device->dev, "fujitsu-laptop",
&device->dev, device,
&fujitsu_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
priv->bl_device = bd;
return 0;
}
static int acpi_fujitsu_bl_add(struct acpi_device *device)
{
struct fujitsu_bl *priv;
int ret;
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return -ENODEV;
priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
fujitsu_bl = priv;
strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
pr_info("ACPI: %s [%s]\n",
acpi_device_name(device), acpi_device_bid(device));
if (get_max_brightness(device) <= 0)
priv->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level(device);
ret = acpi_fujitsu_bl_input_setup(device);
if (ret)
return ret;
return fujitsu_backlight_register(device);
}
/* Brightness notify */
static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
{
struct fujitsu_bl *priv = acpi_driver_data(device);
int oldb, newb;
if (event != ACPI_FUJITSU_NOTIFY_CODE) {
acpi_handle_info(device->handle, "unsupported event [0x%x]\n",
event);
sparse_keymap_report_event(priv->input, -1, 1, true);
return;
}
oldb = priv->brightness_level;
get_lcd_level(device);
newb = priv->brightness_level;
acpi_handle_debug(device->handle,
"brightness button event [%i -> %i]\n", oldb, newb);
if (oldb == newb)
return;
if (!disable_brightness_adjust)
set_lcd_level(device, newb);
sparse_keymap_report_event(priv->input, oldb < newb, 1, true);
}
/* ACPI device for hotkey handling */
static const struct key_entry keymap_default[] = {
{ KE_KEY, KEY1_CODE, { KEY_PROG1 } },
{ KE_KEY, KEY2_CODE, { KEY_PROG2 } },
{ KE_KEY, KEY3_CODE, { KEY_PROG3 } },
{ KE_KEY, KEY4_CODE, { KEY_PROG4 } },
{ KE_KEY, KEY5_CODE, { KEY_RFKILL } },
/* Soft keys read from status flags */
{ KE_KEY, FLAG_RFKILL, { KEY_RFKILL } },
{ KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } },
{ KE_KEY, FLAG_MICMUTE, { KEY_MICMUTE } },
{ KE_END, 0 }
};
static const struct key_entry keymap_s64x0[] = {
{ KE_KEY, KEY1_CODE, { KEY_SCREENLOCK } }, /* "Lock" */
{ KE_KEY, KEY2_CODE, { KEY_HELP } }, /* "Mobility Center */
{ KE_KEY, KEY3_CODE, { KEY_PROG3 } },
{ KE_KEY, KEY4_CODE, { KEY_PROG4 } },
{ KE_END, 0 }
};
static const struct key_entry keymap_p8010[] = {
{ KE_KEY, KEY1_CODE, { KEY_HELP } }, /* "Support" */
{ KE_KEY, KEY2_CODE, { KEY_PROG2 } },
{ KE_KEY, KEY3_CODE, { KEY_SWITCHVIDEOMODE } }, /* "Presentation" */
{ KE_KEY, KEY4_CODE, { KEY_WWW } }, /* "WWW" */
{ KE_END, 0 }
};
static const struct key_entry *keymap = keymap_default;
static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id)
{
pr_info("Identified laptop model '%s'\n", id->ident);
keymap = id->driver_data;
return 1;
}
static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
{
.callback = fujitsu_laptop_dmi_keymap_override,
.ident = "Fujitsu Siemens S6410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
},
.driver_data = (void *)keymap_s64x0
},
{
.callback = fujitsu_laptop_dmi_keymap_override,
.ident = "Fujitsu Siemens S6420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6420"),
},
.driver_data = (void *)keymap_s64x0
},
{
.callback = fujitsu_laptop_dmi_keymap_override,
.ident = "Fujitsu LifeBook P8010",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
},
.driver_data = (void *)keymap_p8010
},
{}
};
static int acpi_fujitsu_laptop_input_setup(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
priv->input = devm_input_allocate_device(&device->dev);
if (!priv->input)
return -ENOMEM;
snprintf(priv->phys, sizeof(priv->phys), "%s/input0",
acpi_device_hid(device));
priv->input->name = acpi_device_name(device);
priv->input->phys = priv->phys;
priv->input->id.bustype = BUS_HOST;
dmi_check_system(fujitsu_laptop_dmi_table);
ret = sparse_keymap_setup(priv->input, keymap, NULL);
if (ret)
return ret;
return input_register_device(priv->input);
}
static int fujitsu_laptop_platform_add(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
priv->pf_device = platform_device_alloc("fujitsu-laptop", PLATFORM_DEVID_NONE);
if (!priv->pf_device)
return -ENOMEM;
platform_set_drvdata(priv->pf_device, priv);
ret = platform_device_add(priv->pf_device);
if (ret)
goto err_put_platform_device;
ret = sysfs_create_group(&priv->pf_device->dev.kobj,
&fujitsu_pf_attribute_group);
if (ret)
goto err_del_platform_device;
return 0;
err_del_platform_device:
platform_device_del(priv->pf_device);
err_put_platform_device:
platform_device_put(priv->pf_device);
return ret;
}
static void fujitsu_laptop_platform_remove(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
sysfs_remove_group(&priv->pf_device->dev.kobj,
&fujitsu_pf_attribute_group);
platform_device_unregister(priv->pf_device);
}
static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int poweron = FUNC_LED_ON, always = FUNC_LED_ON;
int ret;
if (brightness < LED_HALF)
poweron = FUNC_LED_OFF;
if (brightness < LED_FULL)
always = FUNC_LED_OFF;
ret = call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
if (ret < 0)
return ret;
return call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
}
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int ret;
ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
if (ret == FUNC_LED_ON)
return LED_FULL;
ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
if (ret == FUNC_LED_ON)
return LED_HALF;
return LED_OFF;
}
static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
if (brightness >= LED_FULL)
return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
FUNC_LED_ON);
else
return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
FUNC_LED_OFF);
}
static enum led_brightness kblamps_get(struct led_classdev *cdev)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
if (call_fext_func(device,
FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
brightness = LED_FULL;
return brightness;
}
static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
if (brightness >= LED_FULL)
return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
RADIO_LED_ON);
else
return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
0x0);
}
static enum led_brightness radio_led_get(struct led_classdev *cdev)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
if (call_fext_func(device, FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
brightness = LED_FULL;
return brightness;
}
static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int curr;
curr = call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0);
if (brightness >= LED_FULL)
return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
curr | ECO_LED_ON);
else
return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
curr & ~ECO_LED_ON);
}
static enum led_brightness eco_led_get(struct led_classdev *cdev)
{
struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
if (call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
brightness = LED_FULL;
return brightness;
}
static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
struct led_classdev *led;
int ret;
if (call_fext_func(device,
FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->name = "fujitsu::logolamp";
led->brightness_set_blocking = logolamp_set;
led->brightness_get = logolamp_get;
ret = devm_led_classdev_register(&device->dev, led);
if (ret)
return ret;
}
if ((call_fext_func(device,
FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
(call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->name = "fujitsu::kblamps";
led->brightness_set_blocking = kblamps_set;
led->brightness_get = kblamps_get;
ret = devm_led_classdev_register(&device->dev, led);
if (ret)
return ret;
}
/*
* Some Fujitsu laptops have a radio toggle button in place of a slide
* switch and all such machines appear to also have an RF LED. Based on
* comparing DSDT tables of four Fujitsu Lifebook models (E744, E751,
* S7110, S8420; the first one has a radio toggle button, the other
* three have slide switches), bit 17 of flags_supported (the value
* returned by method S000 of ACPI device FUJ02E3) seems to indicate
* whether given model has a radio toggle button.
*/
if (priv->flags_supported & BIT(17)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->name = "fujitsu::radio_led";
led->brightness_set_blocking = radio_led_set;
led->brightness_get = radio_led_get;
led->default_trigger = "rfkill-any";
ret = devm_led_classdev_register(&device->dev, led);
if (ret)
return ret;
}
/* Support for eco led is not always signaled in bit corresponding
* to the bit used to control the led. According to the DSDT table,
* bit 14 seems to indicate presence of said led as well.
* Confirm by testing the status.
*/
if ((call_fext_func(device, FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
(call_fext_func(device,
FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->name = "fujitsu::eco_led";
led->brightness_set_blocking = eco_led_set;
led->brightness_get = eco_led_get;
ret = devm_led_classdev_register(&device->dev, led);
if (ret)
return ret;
}
return 0;
}
static int acpi_fujitsu_laptop_add(struct acpi_device *device)
{
struct fujitsu_laptop *priv;
int ret, i = 0;
priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
fext = device;
strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
device->driver_data = priv;
/* kfifo */
spin_lock_init(&priv->fifo_lock);
ret = kfifo_alloc(&priv->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (ret)
return ret;
pr_info("ACPI: %s [%s]\n",
acpi_device_name(device), acpi_device_bid(device));
while (call_fext_func(device, FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0 &&
i++ < MAX_HOTKEY_RINGBUFFER_SIZE)
; /* No action, result is discarded */
acpi_handle_debug(device->handle, "Discarded %i ringbuffer entries\n",
i);
priv->flags_supported = call_fext_func(device, FUNC_FLAGS, 0x0, 0x0,
0x0);
/* Make sure our bitmask of supported functions is cleared if the
RFKILL function block is not implemented, like on the S7020. */
if (priv->flags_supported == UNSUPPORTED_CMD)
priv->flags_supported = 0;
if (priv->flags_supported)
priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
0x0);
/* Suspect this is a keymap of the application panel, print it */
acpi_handle_info(device->handle, "BTNI: [0x%x]\n",
call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0));
/* Sync backlight power status */
if (fujitsu_bl && fujitsu_bl->bl_device &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
if (call_fext_func(fext, FUNC_BACKLIGHT, 0x2,
BACKLIGHT_PARAM_POWER, 0x0) == BACKLIGHT_OFF)
fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
else
fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
}
ret = acpi_fujitsu_laptop_input_setup(device);
if (ret)
goto err_free_fifo;
ret = acpi_fujitsu_laptop_leds_register(device);
if (ret)
goto err_free_fifo;
ret = fujitsu_laptop_platform_add(device);
if (ret)
goto err_free_fifo;
return 0;
err_free_fifo:
kfifo_free(&priv->fifo);
return ret;
}
static void acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
fujitsu_laptop_platform_remove(device);
kfifo_free(&priv->fifo);
}
static void acpi_fujitsu_laptop_press(struct acpi_device *device, int scancode)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
ret = kfifo_in_locked(&priv->fifo, (unsigned char *)&scancode,
sizeof(scancode), &priv->fifo_lock);
if (ret != sizeof(scancode)) {
dev_info(&priv->input->dev, "Could not push scancode [0x%x]\n",
scancode);
return;
}
sparse_keymap_report_event(priv->input, scancode, 1, false);
dev_dbg(&priv->input->dev, "Push scancode into ringbuffer [0x%x]\n",
scancode);
}
static void acpi_fujitsu_laptop_release(struct acpi_device *device)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
int scancode, ret;
while (true) {
ret = kfifo_out_locked(&priv->fifo, (unsigned char *)&scancode,
sizeof(scancode), &priv->fifo_lock);
if (ret != sizeof(scancode))
return;
sparse_keymap_report_event(priv->input, scancode, 0, false);
dev_dbg(&priv->input->dev,
"Pop scancode from ringbuffer [0x%x]\n", scancode);
}
}
static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
{
struct fujitsu_laptop *priv = acpi_driver_data(device);
unsigned long flags;
int scancode, i = 0;
unsigned int irb;
if (event != ACPI_FUJITSU_NOTIFY_CODE) {
acpi_handle_info(device->handle, "Unsupported event [0x%x]\n",
event);
sparse_keymap_report_event(priv->input, -1, 1, true);
return;
}
if (priv->flags_supported)
priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
0x0);
while ((irb = call_fext_func(device,
FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 &&
i++ < MAX_HOTKEY_RINGBUFFER_SIZE) {
scancode = irb & 0x4ff;
if (sparse_keymap_entry_from_scancode(priv->input, scancode))
acpi_fujitsu_laptop_press(device, scancode);
else if (scancode == 0)
acpi_fujitsu_laptop_release(device);
else
acpi_handle_info(device->handle,
"Unknown GIRB result [%x]\n", irb);
}
/*
* First seen on the Skylake-based Lifebook E736/E746/E756), the
* touchpad toggle hotkey (Fn+F4) is handled in software. Other models
* have since added additional "soft keys". These are reported in the
* status flags queried using FUNC_FLAGS.
*/
if (priv->flags_supported & (FLAG_SOFTKEYS)) {
flags = call_fext_func(device, FUNC_FLAGS, 0x1, 0x0, 0x0);
flags &= (FLAG_SOFTKEYS);
for_each_set_bit(i, &flags, BITS_PER_LONG)
sparse_keymap_report_event(priv->input, BIT(i), 1, true);
}
}
/* Initialization */
static const struct acpi_device_id fujitsu_bl_device_ids[] = {
{ACPI_FUJITSU_BL_HID, 0},
{"", 0},
};
static struct acpi_driver acpi_fujitsu_bl_driver = {
.name = ACPI_FUJITSU_BL_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
.ids = fujitsu_bl_device_ids,
.ops = {
.add = acpi_fujitsu_bl_add,
.notify = acpi_fujitsu_bl_notify,
},
};
static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
{ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0},
};
static struct acpi_driver acpi_fujitsu_laptop_driver = {
.name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
.ids = fujitsu_laptop_device_ids,
.ops = {
.add = acpi_fujitsu_laptop_add,
.remove = acpi_fujitsu_laptop_remove,
.notify = acpi_fujitsu_laptop_notify,
},
};
static const struct acpi_device_id fujitsu_ids[] __used = {
{ACPI_FUJITSU_BL_HID, 0},
{ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
static int __init fujitsu_init(void)
{
int ret;
ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
if (ret)
return ret;
/* Register platform stuff */
ret = platform_driver_register(&fujitsu_pf_driver);
if (ret)
goto err_unregister_acpi;
/* Register laptop driver */
ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
if (ret)
goto err_unregister_platform_driver;
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
err_unregister_platform_driver:
platform_driver_unregister(&fujitsu_pf_driver);
err_unregister_acpi:
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
return ret;
}
static void __exit fujitsu_cleanup(void)
{
acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
platform_driver_unregister(&fujitsu_pf_driver);
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
pr_info("driver unloaded\n");
}
module_init(fujitsu_init);
module_exit(fujitsu_cleanup);
module_param(use_alt_lcd_levels, int, 0644);
MODULE_PARM_DESC(use_alt_lcd_levels, "Interface used for setting LCD brightness level (-1 = auto, 0 = force SBLL, 1 = force SBL2)");
module_param(disable_brightness_adjust, bool, 0644);
MODULE_PARM_DESC(disable_brightness_adjust, "Disable LCD brightness adjustment");
MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
MODULE_DESCRIPTION("Fujitsu laptop extras support");
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/fujitsu-laptop.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Primary to Sideband (P2SB) bridge access support
*
* Copyright (c) 2017, 2021-2022 Intel Corporation.
*
* Authors: Andy Shevchenko <[email protected]>
* Jonathan Yong <[email protected]>
*/
#include <linux/bits.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/p2sb.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#define P2SBC 0xe0
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
{}
};
static int p2sb_get_devfn(unsigned int *devfn)
{
unsigned int fn = P2SB_DEVFN_DEFAULT;
const struct x86_cpu_id *id;
id = x86_match_cpu(p2sb_cpu_ids);
if (id)
fn = (unsigned int)id->driver_data;
*devfn = fn;
return 0;
}
/* Copy resource from the first BAR of the device in question */
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
struct resource *bar0 = &pdev->resource[0];
/* Make sure we have no dangling pointers in the output */
memset(mem, 0, sizeof(*mem));
/*
* We copy only selected fields from the original resource.
* Because a PCI device will be removed soon, we may not use
* any allocated data, hence we may not copy any pointers.
*/
mem->start = bar0->start;
mem->end = bar0->end;
mem->flags = bar0->flags;
mem->desc = bar0->desc;
return 0;
}
static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
struct pci_dev *pdev;
int ret;
pdev = pci_scan_single_device(bus, devfn);
if (!pdev)
return -ENODEV;
ret = p2sb_read_bar0(pdev, mem);
pci_stop_and_remove_bus_device(pdev);
return ret;
}
/**
* p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
* @bus: PCI bus to communicate with
* @devfn: PCI slot and function to communicate with
* @mem: memory resource to be filled in
*
* The BIOS prevents the P2SB device from being enumerated by the PCI
* subsystem, so we need to unhide and hide it back to lookup the BAR.
*
* if @bus is NULL, the bus 0 in domain 0 will be used.
* If @devfn is 0, it will be replaced by devfn of the P2SB device.
*
* Caller must provide a valid pointer to @mem.
*
* Locking is handled by pci_rescan_remove_lock mutex.
*
* Return:
* 0 on success or appropriate errno value on error.
*/
int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
struct pci_dev *pdev_p2sb;
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
int ret;
/* Get devfn for P2SB device itself */
ret = p2sb_get_devfn(&devfn_p2sb);
if (ret)
return ret;
/* if @bus is NULL, use bus 0 in domain 0 */
bus = bus ?: pci_find_bus(0, 0);
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
* removing via sysfs while it is temporarily exposed.
*/
pci_lock_rescan_remove();
/* Unhide the P2SB device, if needed */
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
if (devfn)
ret = p2sb_scan_and_read(bus, devfn, mem);
else
ret = p2sb_read_bar0(pdev_p2sb, mem);
pci_stop_and_remove_bus_device(pdev_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
pci_unlock_rescan_remove();
if (ret)
return ret;
if (mem->flags == 0)
return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(p2sb_bar);
| linux-master | drivers/platform/x86/p2sb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI driver for the Intel SCU.
*
* Copyright (C) 2008-2010, 2015, 2020 Intel Corporation
* Authors: Sreedhara DS ([email protected])
* Mika Westerberg <[email protected]>
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
static int intel_scu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_scu_ipc_data scu_data = {};
struct intel_scu_ipc_dev *scu;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
scu_data.mem = pdev->resource[0];
scu_data.irq = pdev->irq;
scu = intel_scu_ipc_register(&pdev->dev, &scu_data);
return PTR_ERR_OR_ZERO(scu);
}
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
{ PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },
{ PCI_VDEVICE(INTEL, 0x1a94) },
{ PCI_VDEVICE(INTEL, 0x5a94) },
{}
};
static struct pci_driver intel_scu_pci_driver = {
.driver = {
.suppress_bind_attrs = true,
},
.name = "intel_scu",
.id_table = pci_ids,
.probe = intel_scu_pci_probe,
};
builtin_pci_driver(intel_scu_pci_driver);
| linux-master | drivers/platform/x86/intel_scu_pcidrv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Firmware attributes class helper module */
#include <linux/mutex.h>
#include <linux/device/class.h>
#include <linux/module.h>
#include "firmware_attributes_class.h"
static DEFINE_MUTEX(fw_attr_lock);
static int fw_attr_inuse;
static struct class firmware_attributes_class = {
.name = "firmware-attributes",
};
int fw_attributes_class_get(struct class **fw_attr_class)
{
int err;
mutex_lock(&fw_attr_lock);
if (!fw_attr_inuse) { /*first time class is being used*/
err = class_register(&firmware_attributes_class);
if (err) {
mutex_unlock(&fw_attr_lock);
return err;
}
}
fw_attr_inuse++;
*fw_attr_class = &firmware_attributes_class;
mutex_unlock(&fw_attr_lock);
return 0;
}
EXPORT_SYMBOL_GPL(fw_attributes_class_get);
int fw_attributes_class_put(void)
{
mutex_lock(&fw_attr_lock);
if (!fw_attr_inuse) {
mutex_unlock(&fw_attr_lock);
return -EINVAL;
}
fw_attr_inuse--;
if (!fw_attr_inuse) /* No more consumers */
class_unregister(&firmware_attributes_class);
mutex_unlock(&fw_attr_lock);
return 0;
}
EXPORT_SYMBOL_GPL(fw_attributes_class_put);
MODULE_AUTHOR("Mark Pearson <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/firmware_attributes_class.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* lg-laptop.c - LG Gram ACPI features and hotkeys Driver
*
* Copyright (C) 2018 Matan Ziv-Av <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <acpi/battery.h>
#define LED_DEVICE(_name, max, flag) struct led_classdev _name = { \
.name = __stringify(_name), \
.max_brightness = max, \
.brightness_set = _name##_set, \
.brightness_get = _name##_get, \
.flags = flag, \
}
MODULE_AUTHOR("Matan Ziv-Av");
MODULE_DESCRIPTION("LG WMI Hotkey Driver");
MODULE_LICENSE("GPL");
#define WMI_EVENT_GUID0 "E4FB94F9-7F2B-4173-AD1A-CD1D95086248"
#define WMI_EVENT_GUID1 "023B133E-49D1-4E10-B313-698220140DC2"
#define WMI_EVENT_GUID2 "37BE1AC0-C3F2-4B1F-BFBE-8FDEAF2814D6"
#define WMI_EVENT_GUID3 "911BAD44-7DF8-4FBB-9319-BABA1C4B293B"
#define WMI_METHOD_WMAB "C3A72B38-D3EF-42D3-8CBB-D5A57049F66D"
#define WMI_METHOD_WMBB "2B4F501A-BD3C-4394-8DCF-00A7D2BC8210"
#define WMI_EVENT_GUID WMI_EVENT_GUID0
#define WMAB_METHOD "\\XINI.WMAB"
#define WMBB_METHOD "\\XINI.WMBB"
#define SB_GGOV_METHOD "\\_SB.GGOV"
#define GOV_TLED 0x2020008
#define WM_GET 1
#define WM_SET 2
#define WM_KEY_LIGHT 0x400
#define WM_TLED 0x404
#define WM_FN_LOCK 0x407
#define WM_BATT_LIMIT 0x61
#define WM_READER_MODE 0xBF
#define WM_FAN_MODE 0x33
#define WMBB_USB_CHARGE 0x10B
#define WMBB_BATT_LIMIT 0x10C
#define PLATFORM_NAME "lg-laptop"
MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
MODULE_ALIAS("wmi:" WMI_EVENT_GUID1);
MODULE_ALIAS("wmi:" WMI_EVENT_GUID2);
MODULE_ALIAS("wmi:" WMI_EVENT_GUID3);
MODULE_ALIAS("wmi:" WMI_METHOD_WMAB);
MODULE_ALIAS("wmi:" WMI_METHOD_WMBB);
static struct platform_device *pf_device;
static struct input_dev *wmi_input_dev;
static u32 inited;
#define INIT_INPUT_WMI_0 0x01
#define INIT_INPUT_WMI_2 0x02
#define INIT_INPUT_ACPI 0x04
#define INIT_SPARSE_KEYMAP 0x80
static int battery_limit_use_wmbb;
static struct led_classdev kbd_backlight;
static enum led_brightness get_kbd_backlight_level(void);
static const struct key_entry wmi_keymap[] = {
{KE_KEY, 0x70, {KEY_F15} }, /* LG control panel (F1) */
{KE_KEY, 0x74, {KEY_F21} }, /* Touchpad toggle (F5) */
{KE_KEY, 0xf020000, {KEY_F14} }, /* Read mode (F9) */
{KE_KEY, 0x10000000, {KEY_F16} },/* Keyboard backlight (F8) - pressing
* this key both sends an event and
* changes backlight level.
*/
{KE_KEY, 0x80, {KEY_RFKILL} },
{KE_END, 0}
};
static int ggov(u32 arg0)
{
union acpi_object args[1];
union acpi_object *r;
acpi_status status;
acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
int res;
args[0].type = ACPI_TYPE_INTEGER;
args[0].integer.value = arg0;
status = acpi_get_handle(NULL, (acpi_string) SB_GGOV_METHOD, &handle);
if (ACPI_FAILURE(status)) {
pr_err("Cannot get handle");
return -ENODEV;
}
arg.count = 1;
arg.pointer = args;
status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "GGOV: call failed.\n");
return -EINVAL;
}
r = buffer.pointer;
if (r->type != ACPI_TYPE_INTEGER) {
kfree(r);
return -EINVAL;
}
res = r->integer.value;
kfree(r);
return res;
}
static union acpi_object *lg_wmab(u32 method, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
args[0].type = ACPI_TYPE_INTEGER;
args[0].integer.value = method;
args[1].type = ACPI_TYPE_INTEGER;
args[1].integer.value = arg1;
args[2].type = ACPI_TYPE_INTEGER;
args[2].integer.value = arg2;
status = acpi_get_handle(NULL, (acpi_string) WMAB_METHOD, &handle);
if (ACPI_FAILURE(status)) {
pr_err("Cannot get handle");
return NULL;
}
arg.count = 3;
arg.pointer = args;
status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "WMAB: call failed.\n");
return NULL;
}
return buffer.pointer;
}
static union acpi_object *lg_wmbb(u32 method_id, u32 arg1, u32 arg2)
{
union acpi_object args[3];
acpi_status status;
acpi_handle handle;
struct acpi_object_list arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u8 buf[32];
*(u32 *)buf = method_id;
*(u32 *)(buf + 4) = arg1;
*(u32 *)(buf + 16) = arg2;
args[0].type = ACPI_TYPE_INTEGER;
args[0].integer.value = 0; /* ignored */
args[1].type = ACPI_TYPE_INTEGER;
args[1].integer.value = 1; /* Must be 1 or 2. Does not matter which */
args[2].type = ACPI_TYPE_BUFFER;
args[2].buffer.length = 32;
args[2].buffer.pointer = buf;
status = acpi_get_handle(NULL, (acpi_string)WMBB_METHOD, &handle);
if (ACPI_FAILURE(status)) {
pr_err("Cannot get handle");
return NULL;
}
arg.count = 3;
arg.pointer = args;
status = acpi_evaluate_object(handle, NULL, &arg, &buffer);
if (ACPI_FAILURE(status)) {
acpi_handle_err(handle, "WMAB: call failed.\n");
return NULL;
}
return (union acpi_object *)buffer.pointer;
}
static void wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
long data = (long)context;
pr_debug("event guid %li\n", data);
status = wmi_get_event_data(value, &response);
if (ACPI_FAILURE(status)) {
pr_err("Bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj)
return;
if (obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
struct key_entry *key;
if (eventcode == 0x10000000) {
led_classdev_notify_brightness_hw_changed(
&kbd_backlight, get_kbd_backlight_level());
} else {
key = sparse_keymap_entry_from_scancode(
wmi_input_dev, eventcode);
if (key && key->type == KE_KEY)
sparse_keymap_report_entry(wmi_input_dev,
key, 1, true);
}
}
pr_debug("Type: %i Eventcode: 0x%llx\n", obj->type,
obj->integer.value);
kfree(response.pointer);
}
static void wmi_input_setup(void)
{
acpi_status status;
wmi_input_dev = input_allocate_device();
if (wmi_input_dev) {
wmi_input_dev->name = "LG WMI hotkeys";
wmi_input_dev->phys = "wmi/input0";
wmi_input_dev->id.bustype = BUS_HOST;
if (sparse_keymap_setup(wmi_input_dev, wmi_keymap, NULL) ||
input_register_device(wmi_input_dev)) {
pr_info("Cannot initialize input device");
input_free_device(wmi_input_dev);
return;
}
inited |= INIT_SPARSE_KEYMAP;
status = wmi_install_notify_handler(WMI_EVENT_GUID0, wmi_notify,
(void *)0);
if (ACPI_SUCCESS(status))
inited |= INIT_INPUT_WMI_0;
status = wmi_install_notify_handler(WMI_EVENT_GUID2, wmi_notify,
(void *)2);
if (ACPI_SUCCESS(status))
inited |= INIT_INPUT_WMI_2;
} else {
pr_info("Cannot allocate input device");
}
}
static void acpi_notify(struct acpi_device *device, u32 event)
{
struct key_entry *key;
acpi_handle_debug(device->handle, "notify: %d\n", event);
if (inited & INIT_SPARSE_KEYMAP) {
key = sparse_keymap_entry_from_scancode(wmi_input_dev, 0x80);
if (key && key->type == KE_KEY)
sparse_keymap_report_entry(wmi_input_dev, key, 1, true);
}
}
static ssize_t fan_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
bool value;
union acpi_object *r;
u32 m;
int ret;
ret = kstrtobool(buffer, &value);
if (ret)
return ret;
r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_INTEGER) {
kfree(r);
return -EIO;
}
m = r->integer.value;
kfree(r);
r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
kfree(r);
r = lg_wmab(WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
kfree(r);
return count;
}
static ssize_t fan_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
unsigned int status;
union acpi_object *r;
r = lg_wmab(WM_FAN_MODE, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_INTEGER) {
kfree(r);
return -EIO;
}
status = r->integer.value & 0x01;
kfree(r);
return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t usb_charge_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
bool value;
union acpi_object *r;
int ret;
ret = kstrtobool(buffer, &value);
if (ret)
return ret;
r = lg_wmbb(WMBB_USB_CHARGE, WM_SET, value);
if (!r)
return -EIO;
kfree(r);
return count;
}
static ssize_t usb_charge_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
unsigned int status;
union acpi_object *r;
r = lg_wmbb(WMBB_USB_CHARGE, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_BUFFER) {
kfree(r);
return -EIO;
}
status = !!r->buffer.pointer[0x10];
kfree(r);
return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t reader_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
bool value;
union acpi_object *r;
int ret;
ret = kstrtobool(buffer, &value);
if (ret)
return ret;
r = lg_wmab(WM_READER_MODE, WM_SET, value);
if (!r)
return -EIO;
kfree(r);
return count;
}
static ssize_t reader_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
unsigned int status;
union acpi_object *r;
r = lg_wmab(WM_READER_MODE, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_INTEGER) {
kfree(r);
return -EIO;
}
status = !!r->integer.value;
kfree(r);
return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t fn_lock_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
bool value;
union acpi_object *r;
int ret;
ret = kstrtobool(buffer, &value);
if (ret)
return ret;
r = lg_wmab(WM_FN_LOCK, WM_SET, value);
if (!r)
return -EIO;
kfree(r);
return count;
}
static ssize_t fn_lock_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
unsigned int status;
union acpi_object *r;
r = lg_wmab(WM_FN_LOCK, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_BUFFER) {
kfree(r);
return -EIO;
}
status = !!r->buffer.pointer[0];
kfree(r);
return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t charge_control_end_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long value;
int ret;
ret = kstrtoul(buf, 10, &value);
if (ret)
return ret;
if (value == 100 || value == 80) {
union acpi_object *r;
if (battery_limit_use_wmbb)
r = lg_wmbb(WMBB_BATT_LIMIT, WM_SET, value);
else
r = lg_wmab(WM_BATT_LIMIT, WM_SET, value);
if (!r)
return -EIO;
kfree(r);
return count;
}
return -EINVAL;
}
static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
unsigned int status;
union acpi_object *r;
if (battery_limit_use_wmbb) {
r = lg_wmbb(WMBB_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_BUFFER) {
kfree(r);
return -EIO;
}
status = r->buffer.pointer[0x10];
} else {
r = lg_wmab(WM_BATT_LIMIT, WM_GET, 0);
if (!r)
return -EIO;
if (r->type != ACPI_TYPE_INTEGER) {
kfree(r);
return -EIO;
}
status = r->integer.value;
}
kfree(r);
if (status != 80 && status != 100)
status = 0;
return sysfs_emit(buf, "%d\n", status);
}
static ssize_t battery_care_limit_show(struct device *dev,
struct device_attribute *attr,
char *buffer)
{
return charge_control_end_threshold_show(dev, attr, buffer);
}
static ssize_t battery_care_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
return charge_control_end_threshold_store(dev, attr, buffer, count);
}
static DEVICE_ATTR_RW(fan_mode);
static DEVICE_ATTR_RW(usb_charge);
static DEVICE_ATTR_RW(reader_mode);
static DEVICE_ATTR_RW(fn_lock);
static DEVICE_ATTR_RW(charge_control_end_threshold);
static DEVICE_ATTR_RW(battery_care_limit);
static int lg_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
{
if (device_create_file(&battery->dev,
&dev_attr_charge_control_end_threshold))
return -ENODEV;
return 0;
}
static int lg_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook)
{
device_remove_file(&battery->dev,
&dev_attr_charge_control_end_threshold);
return 0;
}
static struct acpi_battery_hook battery_hook = {
.add_battery = lg_battery_add,
.remove_battery = lg_battery_remove,
.name = "LG Battery Extension",
};
static struct attribute *dev_attributes[] = {
&dev_attr_fan_mode.attr,
&dev_attr_usb_charge.attr,
&dev_attr_reader_mode.attr,
&dev_attr_fn_lock.attr,
&dev_attr_battery_care_limit.attr,
NULL
};
static const struct attribute_group dev_attribute_group = {
.attrs = dev_attributes,
};
static void tpad_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
union acpi_object *r;
r = lg_wmab(WM_TLED, WM_SET, brightness > LED_OFF);
kfree(r);
}
static enum led_brightness tpad_led_get(struct led_classdev *cdev)
{
return ggov(GOV_TLED) > 0 ? LED_ON : LED_OFF;
}
static LED_DEVICE(tpad_led, 1, 0);
static void kbd_backlight_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
u32 val;
union acpi_object *r;
val = 0x22;
if (brightness <= LED_OFF)
val = 0;
if (brightness >= LED_FULL)
val = 0x24;
r = lg_wmab(WM_KEY_LIGHT, WM_SET, val);
kfree(r);
}
static enum led_brightness get_kbd_backlight_level(void)
{
union acpi_object *r;
int val;
r = lg_wmab(WM_KEY_LIGHT, WM_GET, 0);
if (!r)
return LED_OFF;
if (r->type != ACPI_TYPE_BUFFER || r->buffer.pointer[1] != 0x05) {
kfree(r);
return LED_OFF;
}
switch (r->buffer.pointer[0] & 0x27) {
case 0x24:
val = LED_FULL;
break;
case 0x22:
val = LED_HALF;
break;
default:
val = LED_OFF;
}
kfree(r);
return val;
}
static enum led_brightness kbd_backlight_get(struct led_classdev *cdev)
{
return get_kbd_backlight_level();
}
static LED_DEVICE(kbd_backlight, 255, LED_BRIGHT_HW_CHANGED);
static void wmi_input_destroy(void)
{
if (inited & INIT_INPUT_WMI_2)
wmi_remove_notify_handler(WMI_EVENT_GUID2);
if (inited & INIT_INPUT_WMI_0)
wmi_remove_notify_handler(WMI_EVENT_GUID0);
if (inited & INIT_SPARSE_KEYMAP)
input_unregister_device(wmi_input_dev);
inited &= ~(INIT_INPUT_WMI_0 | INIT_INPUT_WMI_2 | INIT_SPARSE_KEYMAP);
}
static struct platform_driver pf_driver = {
.driver = {
.name = PLATFORM_NAME,
}
};
static int acpi_add(struct acpi_device *device)
{
int ret;
const char *product;
int year = 2017;
if (pf_device)
return 0;
ret = platform_driver_register(&pf_driver);
if (ret)
return ret;
pf_device = platform_device_register_simple(PLATFORM_NAME,
PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(pf_device)) {
ret = PTR_ERR(pf_device);
pf_device = NULL;
pr_err("unable to register platform device\n");
goto out_platform_registered;
}
product = dmi_get_system_info(DMI_PRODUCT_NAME);
if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
if (strlen(product) > 5)
switch (product[5]) {
case 'N':
year = 2021;
break;
case '0':
year = 2016;
break;
default:
year = 2022;
}
break;
case '6':
year = 2016;
break;
case '7':
year = 2017;
break;
case '8':
year = 2018;
break;
case '9':
year = 2019;
break;
case '0':
if (strlen(product) > 5)
switch (product[5]) {
case 'N':
year = 2020;
break;
case 'P':
year = 2021;
break;
default:
year = 2022;
}
break;
default:
year = 2019;
}
pr_info("product: %s year: %d\n", product, year);
if (year >= 2019)
battery_limit_use_wmbb = 1;
ret = sysfs_create_group(&pf_device->dev.kobj, &dev_attribute_group);
if (ret)
goto out_platform_device;
/* LEDs are optional */
led_classdev_register(&pf_device->dev, &kbd_backlight);
led_classdev_register(&pf_device->dev, &tpad_led);
wmi_input_setup();
battery_hook_register(&battery_hook);
return 0;
out_platform_device:
platform_device_unregister(pf_device);
out_platform_registered:
platform_driver_unregister(&pf_driver);
return ret;
}
static void acpi_remove(struct acpi_device *device)
{
sysfs_remove_group(&pf_device->dev.kobj, &dev_attribute_group);
led_classdev_unregister(&tpad_led);
led_classdev_unregister(&kbd_backlight);
battery_hook_unregister(&battery_hook);
wmi_input_destroy();
platform_device_unregister(pf_device);
pf_device = NULL;
platform_driver_unregister(&pf_driver);
}
static const struct acpi_device_id device_ids[] = {
{"LGEX0815", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, device_ids);
static struct acpi_driver acpi_driver = {
.name = "LG Gram Laptop Support",
.class = "lg-laptop",
.ids = device_ids,
.ops = {
.add = acpi_add,
.remove = acpi_remove,
.notify = acpi_notify,
},
.owner = THIS_MODULE,
};
static int __init acpi_init(void)
{
int result;
result = acpi_bus_register_driver(&acpi_driver);
if (result < 0) {
pr_debug("Error registering driver\n");
return -ENODEV;
}
return 0;
}
static void __exit acpi_exit(void)
{
acpi_bus_unregister_driver(&acpi_driver);
}
module_init(acpi_init);
module_exit(acpi_exit);
| linux-master | drivers/platform/x86/lg-laptop.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.