python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* TI TPS68470 PMIC operation region driver
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* Author: Rajmohan Mani <[email protected]>
*
* Based on drivers/acpi/pmic/intel_pmic* drivers
*/
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/mfd/tps68470.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
struct tps68470_pmic_table {
u32 address; /* operation region address */
u32 reg; /* corresponding register */
u32 bitmask; /* bit mask for power, clock */
};
#define TI_PMIC_POWER_OPREGION_ID 0xB0
#define TI_PMIC_VR_VAL_OPREGION_ID 0xB1
#define TI_PMIC_CLOCK_OPREGION_ID 0xB2
#define TI_PMIC_CLKFREQ_OPREGION_ID 0xB3
struct tps68470_pmic_opregion {
struct mutex lock;
struct regmap *regmap;
};
#define S_IO_I2C_EN (BIT(0) | BIT(1))
static const struct tps68470_pmic_table power_table[] = {
{
.address = 0x00,
.reg = TPS68470_REG_S_I2C_CTL,
.bitmask = S_IO_I2C_EN,
/* S_I2C_CTL */
},
{
.address = 0x04,
.reg = TPS68470_REG_VCMCTL,
.bitmask = BIT(0),
/* VCMCTL */
},
{
.address = 0x08,
.reg = TPS68470_REG_VAUX1CTL,
.bitmask = BIT(0),
/* VAUX1_CTL */
},
{
.address = 0x0C,
.reg = TPS68470_REG_VAUX2CTL,
.bitmask = BIT(0),
/* VAUX2CTL */
},
{
.address = 0x10,
.reg = TPS68470_REG_VACTL,
.bitmask = BIT(0),
/* VACTL */
},
{
.address = 0x14,
.reg = TPS68470_REG_VDCTL,
.bitmask = BIT(0),
/* VDCTL */
},
};
/* Table to set voltage regulator value */
static const struct tps68470_pmic_table vr_val_table[] = {
{
.address = 0x00,
.reg = TPS68470_REG_VSIOVAL,
.bitmask = TPS68470_VSIOVAL_IOVOLT_MASK,
/* TPS68470_REG_VSIOVAL */
},
{
.address = 0x04,
.reg = TPS68470_REG_VIOVAL,
.bitmask = TPS68470_VIOVAL_IOVOLT_MASK,
/* TPS68470_REG_VIOVAL */
},
{
.address = 0x08,
.reg = TPS68470_REG_VCMVAL,
.bitmask = TPS68470_VCMVAL_VCVOLT_MASK,
/* TPS68470_REG_VCMVAL */
},
{
.address = 0x0C,
.reg = TPS68470_REG_VAUX1VAL,
.bitmask = TPS68470_VAUX1VAL_AUX1VOLT_MASK,
/* TPS68470_REG_VAUX1VAL */
},
{
.address = 0x10,
.reg = TPS68470_REG_VAUX2VAL,
.bitmask = TPS68470_VAUX2VAL_AUX2VOLT_MASK,
/* TPS68470_REG_VAUX2VAL */
},
{
.address = 0x14,
.reg = TPS68470_REG_VAVAL,
.bitmask = TPS68470_VAVAL_AVOLT_MASK,
/* TPS68470_REG_VAVAL */
},
{
.address = 0x18,
.reg = TPS68470_REG_VDVAL,
.bitmask = TPS68470_VDVAL_DVOLT_MASK,
/* TPS68470_REG_VDVAL */
},
};
/* Table to configure clock frequency */
static const struct tps68470_pmic_table clk_freq_table[] = {
{
.address = 0x00,
.reg = TPS68470_REG_POSTDIV2,
.bitmask = BIT(0) | BIT(1),
/* TPS68470_REG_POSTDIV2 */
},
{
.address = 0x04,
.reg = TPS68470_REG_BOOSTDIV,
.bitmask = 0x1F,
/* TPS68470_REG_BOOSTDIV */
},
{
.address = 0x08,
.reg = TPS68470_REG_BUCKDIV,
.bitmask = 0x0F,
/* TPS68470_REG_BUCKDIV */
},
{
.address = 0x0C,
.reg = TPS68470_REG_PLLSWR,
.bitmask = 0x13,
/* TPS68470_REG_PLLSWR */
},
{
.address = 0x10,
.reg = TPS68470_REG_XTALDIV,
.bitmask = 0xFF,
/* TPS68470_REG_XTALDIV */
},
{
.address = 0x14,
.reg = TPS68470_REG_PLLDIV,
.bitmask = 0xFF,
/* TPS68470_REG_PLLDIV */
},
{
.address = 0x18,
.reg = TPS68470_REG_POSTDIV,
.bitmask = 0x83,
/* TPS68470_REG_POSTDIV */
},
};
/* Table to configure and enable clocks */
static const struct tps68470_pmic_table clk_table[] = {
{
.address = 0x00,
.reg = TPS68470_REG_PLLCTL,
.bitmask = 0xF5,
/* TPS68470_REG_PLLCTL */
},
{
.address = 0x04,
.reg = TPS68470_REG_PLLCTL2,
.bitmask = BIT(0),
/* TPS68470_REG_PLLCTL2 */
},
{
.address = 0x08,
.reg = TPS68470_REG_CLKCFG1,
.bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
TPS68470_CLKCFG1_MODE_B_MASK,
/* TPS68470_REG_CLKCFG1 */
},
{
.address = 0x0C,
.reg = TPS68470_REG_CLKCFG2,
.bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
TPS68470_CLKCFG1_MODE_B_MASK,
/* TPS68470_REG_CLKCFG2 */
},
};
static int pmic_get_reg_bit(u64 address,
const struct tps68470_pmic_table *table,
const unsigned int table_size, int *reg,
int *bitmask)
{
u64 i;
i = address / 4;
if (i >= table_size)
return -ENOENT;
if (!reg || !bitmask)
return -EINVAL;
*reg = table[i].reg;
*bitmask = table[i].bitmask;
return 0;
}
static int tps68470_pmic_get_power(struct regmap *regmap, int reg,
int bitmask, u64 *value)
{
unsigned int data;
if (regmap_read(regmap, reg, &data))
return -EIO;
*value = (data & bitmask) ? 1 : 0;
return 0;
}
static int tps68470_pmic_get_vr_val(struct regmap *regmap, int reg,
int bitmask, u64 *value)
{
unsigned int data;
if (regmap_read(regmap, reg, &data))
return -EIO;
*value = data & bitmask;
return 0;
}
static int tps68470_pmic_get_clk(struct regmap *regmap, int reg,
int bitmask, u64 *value)
{
unsigned int data;
if (regmap_read(regmap, reg, &data))
return -EIO;
*value = (data & bitmask) ? 1 : 0;
return 0;
}
static int tps68470_pmic_get_clk_freq(struct regmap *regmap, int reg,
int bitmask, u64 *value)
{
unsigned int data;
if (regmap_read(regmap, reg, &data))
return -EIO;
*value = data & bitmask;
return 0;
}
static int ti_tps68470_regmap_update_bits(struct regmap *regmap, int reg,
int bitmask, u64 value)
{
return regmap_update_bits(regmap, reg, bitmask, value);
}
static acpi_status tps68470_pmic_common_handler(u32 function,
acpi_physical_address address,
u32 bits, u64 *value,
void *region_context,
int (*get)(struct regmap *,
int, int, u64 *),
int (*update)(struct regmap *,
int, int, u64),
const struct tps68470_pmic_table *tbl,
unsigned int tbl_size)
{
struct tps68470_pmic_opregion *opregion = region_context;
struct regmap *regmap = opregion->regmap;
int reg, ret, bitmask;
if (bits != 32)
return AE_BAD_PARAMETER;
ret = pmic_get_reg_bit(address, tbl, tbl_size, ®, &bitmask);
if (ret < 0)
return AE_BAD_PARAMETER;
if (function == ACPI_WRITE && *value > bitmask)
return AE_BAD_PARAMETER;
mutex_lock(&opregion->lock);
ret = (function == ACPI_READ) ?
get(regmap, reg, bitmask, value) :
update(regmap, reg, bitmask, *value);
mutex_unlock(&opregion->lock);
return ret ? AE_ERROR : AE_OK;
}
static acpi_status tps68470_pmic_cfreq_handler(u32 function,
acpi_physical_address address,
u32 bits, u64 *value,
void *handler_context,
void *region_context)
{
return tps68470_pmic_common_handler(function, address, bits, value,
region_context,
tps68470_pmic_get_clk_freq,
ti_tps68470_regmap_update_bits,
clk_freq_table,
ARRAY_SIZE(clk_freq_table));
}
static acpi_status tps68470_pmic_clk_handler(u32 function,
acpi_physical_address address, u32 bits,
u64 *value, void *handler_context,
void *region_context)
{
return tps68470_pmic_common_handler(function, address, bits, value,
region_context,
tps68470_pmic_get_clk,
ti_tps68470_regmap_update_bits,
clk_table,
ARRAY_SIZE(clk_table));
}
static acpi_status tps68470_pmic_vrval_handler(u32 function,
acpi_physical_address address,
u32 bits, u64 *value,
void *handler_context,
void *region_context)
{
return tps68470_pmic_common_handler(function, address, bits, value,
region_context,
tps68470_pmic_get_vr_val,
ti_tps68470_regmap_update_bits,
vr_val_table,
ARRAY_SIZE(vr_val_table));
}
static acpi_status tps68470_pmic_pwr_handler(u32 function,
acpi_physical_address address,
u32 bits, u64 *value,
void *handler_context,
void *region_context)
{
if (bits != 32)
return AE_BAD_PARAMETER;
/* set/clear for bit 0, bits 0 and 1 together */
if (function == ACPI_WRITE &&
!(*value == 0 || *value == 1 || *value == 3)) {
return AE_BAD_PARAMETER;
}
return tps68470_pmic_common_handler(function, address, bits, value,
region_context,
tps68470_pmic_get_power,
ti_tps68470_regmap_update_bits,
power_table,
ARRAY_SIZE(power_table));
}
static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
{
struct regmap *tps68470_regmap = dev_get_drvdata(pdev->dev.parent);
acpi_handle handle = ACPI_HANDLE(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct tps68470_pmic_opregion *opregion;
acpi_status status;
if (!dev || !tps68470_regmap) {
dev_warn(dev, "dev or regmap is NULL\n");
return -EINVAL;
}
if (!handle) {
dev_warn(dev, "acpi handle is NULL\n");
return -ENODEV;
}
opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL);
if (!opregion)
return -ENOMEM;
mutex_init(&opregion->lock);
opregion->regmap = tps68470_regmap;
status = acpi_install_address_space_handler(handle,
TI_PMIC_POWER_OPREGION_ID,
tps68470_pmic_pwr_handler,
NULL, opregion);
if (ACPI_FAILURE(status))
goto out_mutex_destroy;
status = acpi_install_address_space_handler(handle,
TI_PMIC_VR_VAL_OPREGION_ID,
tps68470_pmic_vrval_handler,
NULL, opregion);
if (ACPI_FAILURE(status))
goto out_remove_power_handler;
status = acpi_install_address_space_handler(handle,
TI_PMIC_CLOCK_OPREGION_ID,
tps68470_pmic_clk_handler,
NULL, opregion);
if (ACPI_FAILURE(status))
goto out_remove_vr_val_handler;
status = acpi_install_address_space_handler(handle,
TI_PMIC_CLKFREQ_OPREGION_ID,
tps68470_pmic_cfreq_handler,
NULL, opregion);
if (ACPI_FAILURE(status))
goto out_remove_clk_handler;
return 0;
out_remove_clk_handler:
acpi_remove_address_space_handler(handle, TI_PMIC_CLOCK_OPREGION_ID,
tps68470_pmic_clk_handler);
out_remove_vr_val_handler:
acpi_remove_address_space_handler(handle, TI_PMIC_VR_VAL_OPREGION_ID,
tps68470_pmic_vrval_handler);
out_remove_power_handler:
acpi_remove_address_space_handler(handle, TI_PMIC_POWER_OPREGION_ID,
tps68470_pmic_pwr_handler);
out_mutex_destroy:
mutex_destroy(&opregion->lock);
return -ENODEV;
}
static struct platform_driver tps68470_pmic_opregion_driver = {
.probe = tps68470_pmic_opregion_probe,
.driver = {
.name = "tps68470_pmic_opregion",
},
};
builtin_platform_driver(tps68470_pmic_opregion_driver)
| linux-master | drivers/acpi/pmic/tps68470_pmic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ARM APMT table support.
* Design document number: ARM DEN0117.
*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
*
*/
#define pr_fmt(fmt) "ACPI: APMT: " fmt
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "init.h"
#define DEV_NAME "arm-cs-arch-pmu"
/* There can be up to 3 resources: page 0 and 1 address, and interrupt. */
#define DEV_MAX_RESOURCE_COUNT 3
/* Root pointer to the mapped APMT table */
static struct acpi_table_header *apmt_table;
static int __init apmt_init_resources(struct resource *res,
struct acpi_apmt_node *node)
{
int irq, trigger;
int num_res = 0;
res[num_res].start = node->base_address0;
res[num_res].end = node->base_address0 + SZ_4K - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
if (node->flags & ACPI_APMT_FLAGS_DUAL_PAGE) {
res[num_res].start = node->base_address1;
res[num_res].end = node->base_address1 + SZ_4K - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
}
if (node->ovflw_irq != 0) {
trigger = (node->ovflw_irq_flags & ACPI_APMT_OVFLW_IRQ_FLAGS_MODE);
trigger = (trigger == ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL) ?
ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
irq = acpi_register_gsi(NULL, node->ovflw_irq, trigger,
ACPI_ACTIVE_HIGH);
if (irq <= 0) {
pr_warn("APMT could not register gsi hwirq %d\n", irq);
return num_res;
}
res[num_res].start = irq;
res[num_res].end = irq;
res[num_res].flags = IORESOURCE_IRQ;
num_res++;
}
return num_res;
}
/**
* apmt_add_platform_device() - Allocate a platform device for APMT node
* @node: Pointer to device ACPI APMT node
* @fwnode: fwnode associated with the APMT node
*
* Returns: 0 on success, <0 failure
*/
static int __init apmt_add_platform_device(struct acpi_apmt_node *node,
struct fwnode_handle *fwnode)
{
struct platform_device *pdev;
int ret, count;
struct resource res[DEV_MAX_RESOURCE_COUNT];
pdev = platform_device_alloc(DEV_NAME, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
memset(res, 0, sizeof(res));
count = apmt_init_resources(res, node);
ret = platform_device_add_resources(pdev, res, count);
if (ret)
goto dev_put;
/*
* Add a copy of APMT node pointer to platform_data to be used to
* retrieve APMT data information.
*/
ret = platform_device_add_data(pdev, &node, sizeof(node));
if (ret)
goto dev_put;
pdev->dev.fwnode = fwnode;
ret = platform_device_add(pdev);
if (ret)
goto dev_put;
return 0;
dev_put:
platform_device_put(pdev);
return ret;
}
static int __init apmt_init_platform_devices(void)
{
struct acpi_apmt_node *apmt_node;
struct acpi_table_apmt *apmt;
struct fwnode_handle *fwnode;
u64 offset, end;
int ret;
/*
* apmt_table and apmt both point to the start of APMT table, but
* have different struct types
*/
apmt = (struct acpi_table_apmt *)apmt_table;
offset = sizeof(*apmt);
end = apmt->header.length;
while (offset < end) {
apmt_node = ACPI_ADD_PTR(struct acpi_apmt_node, apmt,
offset);
fwnode = acpi_alloc_fwnode_static();
if (!fwnode)
return -ENOMEM;
ret = apmt_add_platform_device(apmt_node, fwnode);
if (ret) {
acpi_free_fwnode_static(fwnode);
return ret;
}
offset += apmt_node->length;
}
return 0;
}
void __init acpi_apmt_init(void)
{
acpi_status status;
int ret;
/**
* APMT table nodes will be used at runtime after the apmt init,
* so we don't need to call acpi_put_table() to release
* the APMT table mapping.
*/
status = acpi_get_table(ACPI_SIG_APMT, 0, &apmt_table);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_err("Failed to get APMT table, %s\n", msg);
}
return;
}
ret = apmt_init_platform_devices();
if (ret) {
pr_err("Failed to initialize APMT platform devices, ret: %d\n", ret);
acpi_put_table(apmt_table);
}
}
| linux-master | drivers/acpi/arm64/apmt.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/acpi.h>
#include "init.h"
void __init acpi_arm_init(void)
{
if (IS_ENABLED(CONFIG_ACPI_AGDI))
acpi_agdi_init();
if (IS_ENABLED(CONFIG_ACPI_APMT))
acpi_apmt_init();
if (IS_ENABLED(CONFIG_ACPI_IORT))
acpi_iort_init();
if (IS_ENABLED(CONFIG_ARM_AMBA))
acpi_amba_init();
}
| linux-master | drivers/acpi/arm64/init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file implements handling of
* Arm Generic Diagnostic Dump and Reset Interface table (AGDI)
*
* Copyright (c) 2022, Ampere Computing LLC
*/
#define pr_fmt(fmt) "ACPI: AGDI: " fmt
#include <linux/acpi.h>
#include <linux/arm_sdei.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "init.h"
struct agdi_data {
int sdei_event;
};
static int agdi_sdei_handler(u32 sdei_event, struct pt_regs *regs, void *arg)
{
nmi_panic(regs, "Arm Generic Diagnostic Dump and Reset SDEI event issued");
return 0;
}
static int agdi_sdei_probe(struct platform_device *pdev,
struct agdi_data *adata)
{
int err;
err = sdei_event_register(adata->sdei_event, agdi_sdei_handler, pdev);
if (err) {
dev_err(&pdev->dev, "Failed to register for SDEI event %d",
adata->sdei_event);
return err;
}
err = sdei_event_enable(adata->sdei_event);
if (err) {
sdei_event_unregister(adata->sdei_event);
dev_err(&pdev->dev, "Failed to enable event %d\n",
adata->sdei_event);
return err;
}
return 0;
}
static int agdi_probe(struct platform_device *pdev)
{
struct agdi_data *adata = dev_get_platdata(&pdev->dev);
if (!adata)
return -EINVAL;
return agdi_sdei_probe(pdev, adata);
}
static int agdi_remove(struct platform_device *pdev)
{
struct agdi_data *adata = dev_get_platdata(&pdev->dev);
int err, i;
err = sdei_event_disable(adata->sdei_event);
if (err) {
dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
return 0;
}
for (i = 0; i < 3; i++) {
err = sdei_event_unregister(adata->sdei_event);
if (err != -EINPROGRESS)
break;
schedule();
}
if (err)
dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n",
adata->sdei_event, ERR_PTR(err));
return 0;
}
static struct platform_driver agdi_driver = {
.driver = {
.name = "agdi",
},
.probe = agdi_probe,
.remove = agdi_remove,
};
void __init acpi_agdi_init(void)
{
struct acpi_table_agdi *agdi_table;
struct agdi_data pdata;
struct platform_device *pdev;
acpi_status status;
status = acpi_get_table(ACPI_SIG_AGDI, 0,
(struct acpi_table_header **) &agdi_table);
if (ACPI_FAILURE(status))
return;
if (agdi_table->flags & ACPI_AGDI_SIGNALING_MODE) {
pr_warn("Interrupt signaling is not supported");
goto err_put_table;
}
pdata.sdei_event = agdi_table->sdei_event;
pdev = platform_device_register_data(NULL, "agdi", 0, &pdata, sizeof(pdata));
if (IS_ERR(pdev))
goto err_put_table;
if (platform_driver_register(&agdi_driver))
platform_device_unregister(pdev);
err_put_table:
acpi_put_table((struct acpi_table_header *)agdi_table);
}
| linux-master | drivers/acpi/arm64/agdi.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
void acpi_arch_dma_setup(struct device *dev)
{
int ret;
u64 end, mask;
u64 size = 0;
const struct bus_dma_region *map = NULL;
/*
* If @dev is expected to be DMA-capable then the bus code that created
* it should have initialised its dma_mask pointer by this point. For
* now, we'll continue the legacy behaviour of coercing it to the
* coherent mask if not, but we'll no longer do so quietly.
*/
if (!dev->dma_mask) {
dev_warn(dev, "DMA mask not set\n");
dev->dma_mask = &dev->coherent_dma_mask;
}
if (dev->coherent_dma_mask)
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
else
size = 1ULL << 32;
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
const struct bus_dma_region *r = map;
for (end = 0; r->size; r++) {
if (r->dma_start + r->size - 1 > end)
end = r->dma_start + r->size - 1;
}
size = end + 1;
dev->dma_range_map = map;
}
if (ret == -ENODEV)
ret = iort_dma_get_ranges(dev, &size);
if (!ret) {
/*
* Limit coherent and dma mask based on size retrieved from
* firmware.
*/
end = size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask);
}
}
| linux-master | drivers/acpi/arm64/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016, Semihalf
* Author: Tomasz Nowicki <[email protected]>
*
* This file implements early detection/parsing of I/O mapping
* reported to OS through firmware via I/O Remapping Table (IORT)
* IORT document number: ARM DEN 0049A
*/
#define pr_fmt(fmt) "ACPI: IORT: " fmt
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include "init.h"
#define IORT_TYPE_MASK(type) (1 << (type))
#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3))
struct iort_its_msi_chip {
struct list_head list;
struct fwnode_handle *fw_node;
phys_addr_t base_addr;
u32 translation_id;
};
struct iort_fwnode {
struct list_head list;
struct acpi_iort_node *iort_node;
struct fwnode_handle *fwnode;
};
static LIST_HEAD(iort_fwnode_list);
static DEFINE_SPINLOCK(iort_fwnode_lock);
/**
* iort_set_fwnode() - Create iort_fwnode and use it to register
* iommu data in the iort_fwnode_list
*
* @iort_node: IORT table node associated with the IOMMU
* @fwnode: fwnode associated with the IORT node
*
* Returns: 0 on success
* <0 on failure
*/
static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
struct fwnode_handle *fwnode)
{
struct iort_fwnode *np;
np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
if (WARN_ON(!np))
return -ENOMEM;
INIT_LIST_HEAD(&np->list);
np->iort_node = iort_node;
np->fwnode = fwnode;
spin_lock(&iort_fwnode_lock);
list_add_tail(&np->list, &iort_fwnode_list);
spin_unlock(&iort_fwnode_lock);
return 0;
}
/**
* iort_get_fwnode() - Retrieve fwnode associated with an IORT node
*
* @node: IORT table node to be looked-up
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
static inline struct fwnode_handle *iort_get_fwnode(
struct acpi_iort_node *node)
{
struct iort_fwnode *curr;
struct fwnode_handle *fwnode = NULL;
spin_lock(&iort_fwnode_lock);
list_for_each_entry(curr, &iort_fwnode_list, list) {
if (curr->iort_node == node) {
fwnode = curr->fwnode;
break;
}
}
spin_unlock(&iort_fwnode_lock);
return fwnode;
}
/**
* iort_delete_fwnode() - Delete fwnode associated with an IORT node
*
* @node: IORT table node associated with fwnode to delete
*/
static inline void iort_delete_fwnode(struct acpi_iort_node *node)
{
struct iort_fwnode *curr, *tmp;
spin_lock(&iort_fwnode_lock);
list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
if (curr->iort_node == node) {
list_del(&curr->list);
kfree(curr);
break;
}
}
spin_unlock(&iort_fwnode_lock);
}
/**
* iort_get_iort_node() - Retrieve iort_node associated with an fwnode
*
* @fwnode: fwnode associated with device to be looked-up
*
* Returns: iort_node pointer on success, NULL on failure
*/
static inline struct acpi_iort_node *iort_get_iort_node(
struct fwnode_handle *fwnode)
{
struct iort_fwnode *curr;
struct acpi_iort_node *iort_node = NULL;
spin_lock(&iort_fwnode_lock);
list_for_each_entry(curr, &iort_fwnode_list, list) {
if (curr->fwnode == fwnode) {
iort_node = curr->iort_node;
break;
}
}
spin_unlock(&iort_fwnode_lock);
return iort_node;
}
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
/* Root pointer to the mapped IORT table */
static struct acpi_table_header *iort_table;
static LIST_HEAD(iort_msi_chip_list);
static DEFINE_SPINLOCK(iort_msi_chip_lock);
/**
* iort_register_domain_token() - register domain token along with related
* ITS ID and base address to the list from where we can get it back later on.
* @trans_id: ITS ID.
* @base: ITS base address.
* @fw_node: Domain token.
*
* Returns: 0 on success, -ENOMEM if no memory when allocating list element
*/
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node)
{
struct iort_its_msi_chip *its_msi_chip;
its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
if (!its_msi_chip)
return -ENOMEM;
its_msi_chip->fw_node = fw_node;
its_msi_chip->translation_id = trans_id;
its_msi_chip->base_addr = base;
spin_lock(&iort_msi_chip_lock);
list_add(&its_msi_chip->list, &iort_msi_chip_list);
spin_unlock(&iort_msi_chip_lock);
return 0;
}
/**
* iort_deregister_domain_token() - Deregister domain token based on ITS ID
* @trans_id: ITS ID.
*
* Returns: none.
*/
void iort_deregister_domain_token(int trans_id)
{
struct iort_its_msi_chip *its_msi_chip, *t;
spin_lock(&iort_msi_chip_lock);
list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
if (its_msi_chip->translation_id == trans_id) {
list_del(&its_msi_chip->list);
kfree(its_msi_chip);
break;
}
}
spin_unlock(&iort_msi_chip_lock);
}
/**
* iort_find_domain_token() - Find domain token based on given ITS ID
* @trans_id: ITS ID.
*
* Returns: domain token when find on the list, NULL otherwise
*/
struct fwnode_handle *iort_find_domain_token(int trans_id)
{
struct fwnode_handle *fw_node = NULL;
struct iort_its_msi_chip *its_msi_chip;
spin_lock(&iort_msi_chip_lock);
list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
if (its_msi_chip->translation_id == trans_id) {
fw_node = its_msi_chip->fw_node;
break;
}
}
spin_unlock(&iort_msi_chip_lock);
return fw_node;
}
static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
iort_find_node_callback callback,
void *context)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
int i;
if (!iort_table)
return NULL;
/* Get the first IORT node */
iort = (struct acpi_table_iort *)iort_table;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort->node_offset);
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
iort_table->length);
for (i = 0; i < iort->node_count; i++) {
if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
"IORT node pointer overflows, bad table!\n"))
return NULL;
if (iort_node->type == type &&
ACPI_SUCCESS(callback(iort_node, context)))
return iort_node;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
}
return NULL;
}
static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
struct device *dev = context;
acpi_status status = AE_NOT_FOUND;
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device *adev;
struct acpi_iort_named_component *ncomp;
struct device *nc_dev = dev;
/*
* Walk the device tree to find a device with an
* ACPI companion; there is no point in scanning
* IORT for a device matching a named component if
* the device does not have an ACPI companion to
* start with.
*/
do {
adev = ACPI_COMPANION(nc_dev);
if (adev)
break;
nc_dev = nc_dev->parent;
} while (nc_dev);
if (!adev)
goto out;
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
if (ACPI_FAILURE(status)) {
dev_warn(nc_dev, "Can't get device full path name\n");
goto out;
}
ncomp = (struct acpi_iort_named_component *)node->node_data;
status = !strcmp(ncomp->device_name, buf.pointer) ?
AE_OK : AE_NOT_FOUND;
acpi_os_free(buf.pointer);
} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
struct acpi_iort_root_complex *pci_rc;
struct pci_bus *bus;
bus = to_pci_bus(dev);
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
/*
* It is assumed that PCI segment numbers maps one-to-one
* with root complexes. Each segment number can represent only
* one root complex.
*/
status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
AE_OK : AE_NOT_FOUND;
}
out:
return status;
}
static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out, bool check_overlap)
{
/* Single mapping does not care for input id */
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
*rid_out = map->output_base;
return 0;
}
pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
map, type);
return -ENXIO;
}
if (rid_in < map->input_base ||
(rid_in > map->input_base + map->id_count))
return -ENXIO;
if (check_overlap) {
/*
* We already found a mapping for this input ID at the end of
* another region. If it coincides with the start of this
* region, we assume the prior match was due to the off-by-1
* issue mentioned below, and allow it to be superseded.
* Otherwise, things are *really* broken, and we just disregard
* duplicate matches entirely to retain compatibility.
*/
pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
map, rid_in);
if (rid_in != map->input_base)
return -ENXIO;
pr_err(FW_BUG "applying workaround.\n");
}
*rid_out = map->output_base + (rid_in - map->input_base);
/*
* Due to confusion regarding the meaning of the id_count field (which
* carries the number of IDs *minus 1*), we may have to disregard this
* match if it is at the end of the range, and overlaps with the start
* of another one.
*/
if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
return -EAGAIN;
return 0;
}
static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
u32 *id_out, int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
if (!node->mapping_offset || !node->mapping_count ||
index >= node->mapping_count)
return NULL;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
node->mapping_offset + index * sizeof(*map));
/* Firmware bug! */
if (!map->output_reference) {
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
node, node->type);
return NULL;
}
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
node->type == ACPI_IORT_NODE_SMMU_V3 ||
node->type == ACPI_IORT_NODE_PMCG) {
*id_out = map->output_base;
return parent;
}
}
return NULL;
}
#ifndef ACPI_IORT_SMMU_V3_DEVICEID_VALID
#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1 << 4)
#endif
static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
struct acpi_iort_pmcg *pmcg;
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
/*
* SMMUv3 dev ID mapping index was introduced in revision 1
* table, not available in revision 0
*/
if (node->revision < 1)
return -EINVAL;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
/*
* Until IORT E.e (node rev. 5), the ID mapping index was
* defined to be valid unless all interrupts are GSIV-based.
*/
if (node->revision < 5) {
if (smmu->event_gsiv && smmu->pri_gsiv &&
smmu->gerr_gsiv && smmu->sync_gsiv)
return -EINVAL;
} else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) {
return -EINVAL;
}
if (smmu->id_mapping_index >= node->mapping_count) {
pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
node, node->type);
return -EINVAL;
}
return smmu->id_mapping_index;
case ACPI_IORT_NODE_PMCG:
pmcg = (struct acpi_iort_pmcg *)node->node_data;
if (pmcg->overflow_gsiv || node->mapping_count == 0)
return -EINVAL;
return 0;
default:
return -EINVAL;
}
}
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
{
u32 id = id_in;
/* Parse the ID mapping tree to find specified node type */
while (node) {
struct acpi_iort_id_mapping *map;
int i, index, rc = 0;
u32 out_ref = 0, map_id = id;
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (id_out)
*id_out = id;
return node;
}
if (!node->mapping_offset || !node->mapping_count)
goto fail_map;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
node->mapping_offset);
/* Firmware bug! */
if (!map->output_reference) {
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
node, node->type);
goto fail_map;
}
/*
* Get the special ID mapping index (if any) and skip its
* associated ID map to prevent erroneous multi-stage
* IORT ID translations.
*/
index = iort_get_id_mapping_index(node);
/* Do the ID translation */
for (i = 0; i < node->mapping_count; i++, map++) {
/* if it is special mapping index, skip it */
if (i == index)
continue;
rc = iort_id_map(map, node->type, map_id, &id, out_ref);
if (!rc)
break;
if (rc == -EAGAIN)
out_ref = map->output_reference;
}
if (i == node->mapping_count && !out_ref)
goto fail_map;
node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
rc ? out_ref : map->output_reference);
}
fail_map:
/* Map input ID to output ID unchanged on mapping failure */
if (id_out)
*id_out = id_in;
return NULL;
}
static struct acpi_iort_node *iort_node_map_platform_id(
struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
int index)
{
struct acpi_iort_node *parent;
u32 id;
/* step 1: retrieve the initial dev id */
parent = iort_node_get_id(node, &id, index);
if (!parent)
return NULL;
/*
* optional step 2: map the initial dev id if its parent is not
* the target type we want, map it again for the use cases such
* as NC (named component) -> SMMU -> ITS. If the type is matched,
* return the initial dev id and its parent pointer directly.
*/
if (!(IORT_TYPE_MASK(parent->type) & type_mask))
parent = iort_node_map_id(parent, id, id_out, type_mask);
else
if (id_out)
*id_out = id;
return parent;
}
static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
struct pci_bus *pbus;
if (!dev_is_pci(dev)) {
struct acpi_iort_node *node;
/*
* scan iort_fwnode_list to see if it's an iort platform
* device (such as SMMU, PMCG),its iort node already cached
* and associated with fwnode when iort platform devices
* were initialized.
*/
node = iort_get_iort_node(dev->fwnode);
if (node)
return node;
/*
* if not, then it should be a platform device defined in
* DSDT/SSDT (with Named Component node in IORT)
*/
return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
}
pbus = to_pci_dev(dev)->bus;
return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &pbus->dev);
}
/**
* iort_msi_map_id() - Map a MSI input ID for a device
* @dev: The device for which the mapping is to be done.
* @input_id: The device input ID.
*
* Returns: mapped MSI ID on success, input ID otherwise
*/
u32 iort_msi_map_id(struct device *dev, u32 input_id)
{
struct acpi_iort_node *node;
u32 dev_id;
node = iort_find_dev_node(dev);
if (!node)
return input_id;
iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
/**
* iort_pmsi_get_dev_id() - Get the device id for a device
* @dev: The device for which the mapping is to be done.
* @dev_id: The device ID found.
*
* Returns: 0 for successful find a dev id, -ENODEV on error
*/
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
int i, index;
struct acpi_iort_node *node;
node = iort_find_dev_node(dev);
if (!node)
return -ENODEV;
index = iort_get_id_mapping_index(node);
/* if there is a valid index, go get the dev_id directly */
if (index >= 0) {
if (iort_node_get_id(node, dev_id, index))
return 0;
} else {
for (i = 0; i < node->mapping_count; i++) {
if (iort_node_map_platform_id(node, dev_id,
IORT_MSI_TYPE, i))
return 0;
}
}
return -ENODEV;
}
static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
{
struct iort_its_msi_chip *its_msi_chip;
int ret = -ENODEV;
spin_lock(&iort_msi_chip_lock);
list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
if (its_msi_chip->translation_id == its_id) {
*base = its_msi_chip->base_addr;
ret = 0;
break;
}
}
spin_unlock(&iort_msi_chip_lock);
return ret;
}
/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
* @id: Device's ID
* @idx: Index of the ITS identifier list.
* @its_id: ITS identifier.
*
* Returns: 0 on success, appropriate error value otherwise
*/
static int iort_dev_find_its_id(struct device *dev, u32 id,
unsigned int idx, int *its_id)
{
struct acpi_iort_its_group *its;
struct acpi_iort_node *node;
node = iort_find_dev_node(dev);
if (!node)
return -ENXIO;
node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
if (idx >= its->its_count) {
dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
*its_id = its->identifiers[idx];
return 0;
}
/**
* iort_get_device_domain() - Find MSI domain related to a device
* @dev: The device.
* @id: Requester ID for the device.
* @bus_token: irq domain bus token.
*
* Returns: the MSI domain for this device, NULL otherwise
*/
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token)
{
struct fwnode_handle *handle;
int its_id;
if (iort_dev_find_its_id(dev, id, 0, &its_id))
return NULL;
handle = iort_find_domain_token(its_id);
if (!handle)
return NULL;
return irq_find_matching_fwnode(handle, bus_token);
}
static void iort_set_device_domain(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_its_group *its;
struct acpi_iort_node *msi_parent;
struct acpi_iort_id_mapping *map;
struct fwnode_handle *iort_fwnode;
struct irq_domain *domain;
int index;
index = iort_get_id_mapping_index(node);
if (index < 0)
return;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
node->mapping_offset + index * sizeof(*map));
/* Firmware bug! */
if (!map->output_reference ||
!(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
node, node->type);
return;
}
msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)msi_parent->node_data;
iort_fwnode = iort_find_domain_token(its->identifiers[0]);
if (!iort_fwnode)
return;
domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
if (domain)
dev_set_msi_domain(dev, domain);
}
/**
* iort_get_platform_device_domain() - Find MSI domain related to a
* platform device
* @dev: the dev pointer associated with the platform device
*
* Returns: the MSI domain for this device, NULL otherwise
*/
static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
{
struct acpi_iort_node *node, *msi_parent = NULL;
struct fwnode_handle *iort_fwnode;
struct acpi_iort_its_group *its;
int i;
/* find its associated iort node */
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return NULL;
/* then find its msi parent node */
for (i = 0; i < node->mapping_count; i++) {
msi_parent = iort_node_map_platform_id(node, NULL,
IORT_MSI_TYPE, i);
if (msi_parent)
break;
}
if (!msi_parent)
return NULL;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)msi_parent->node_data;
iort_fwnode = iort_find_domain_token(its->identifiers[0]);
if (!iort_fwnode)
return NULL;
return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
}
void acpi_configure_pmsi_domain(struct device *dev)
{
struct irq_domain *msi_domain;
msi_domain = iort_get_platform_device_domain(dev);
if (msi_domain)
dev_set_msi_domain(dev, msi_domain);
}
#ifdef CONFIG_IOMMU_API
static void iort_rmr_free(struct device *dev,
struct iommu_resv_region *region)
{
struct iommu_iort_rmr_data *rmr_data;
rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
kfree(rmr_data->sids);
kfree(rmr_data);
}
static struct iommu_iort_rmr_data *iort_rmr_alloc(
struct acpi_iort_rmr_desc *rmr_desc,
int prot, enum iommu_resv_type type,
u32 *sids, u32 num_sids)
{
struct iommu_iort_rmr_data *rmr_data;
struct iommu_resv_region *region;
u32 *sids_copy;
u64 addr = rmr_desc->base_address, size = rmr_desc->length;
rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
if (!rmr_data)
return NULL;
/* Create a copy of SIDs array to associate with this rmr_data */
sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
if (!sids_copy) {
kfree(rmr_data);
return NULL;
}
rmr_data->sids = sids_copy;
rmr_data->num_sids = num_sids;
if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
/* PAGE align base addr and size */
addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
rmr_desc->base_address,
rmr_desc->base_address + rmr_desc->length - 1,
addr, addr + size - 1);
}
region = &rmr_data->rr;
INIT_LIST_HEAD(®ion->list);
region->start = addr;
region->length = size;
region->prot = prot;
region->type = type;
region->free = iort_rmr_free;
return rmr_data;
}
static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
u32 count)
{
int i, j;
for (i = 0; i < count; i++) {
u64 end, start = desc[i].base_address, length = desc[i].length;
if (!length) {
pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
start);
continue;
}
end = start + length - 1;
/* Check for address overlap */
for (j = i + 1; j < count; j++) {
u64 e_start = desc[j].base_address;
u64 e_end = e_start + desc[j].length - 1;
if (start <= e_end && end >= e_start)
pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
start, end);
}
}
}
/*
* Please note, we will keep the already allocated RMR reserve
* regions in case of a memory allocation failure.
*/
static void iort_get_rmrs(struct acpi_iort_node *node,
struct acpi_iort_node *smmu,
u32 *sids, u32 num_sids,
struct list_head *head)
{
struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
struct acpi_iort_rmr_desc *rmr_desc;
int i;
rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
rmr->rmr_offset);
iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
struct iommu_iort_rmr_data *rmr_data;
enum iommu_resv_type type;
int prot = IOMMU_READ | IOMMU_WRITE;
if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
type = IOMMU_RESV_DIRECT_RELAXABLE;
else
type = IOMMU_RESV_DIRECT;
if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
prot |= IOMMU_PRIV;
/* Attributes 0x00 - 0x03 represents device memory */
if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
ACPI_IORT_RMR_ATTR_DEVICE_GRE)
prot |= IOMMU_MMIO;
else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
prot |= IOMMU_CACHE;
rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
sids, num_sids);
if (!rmr_data)
return;
list_add_tail(&rmr_data->rr.list, head);
}
}
static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
u32 new_count)
{
u32 *new_sids;
u32 total_count = count + new_count;
int i;
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
if (!new_sids)
return NULL;
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
return new_sids;
}
static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
u32 id_count)
{
int i;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
/*
* Make sure the kernel has preserved the boot firmware PCIe
* configuration. This is required to ensure that the RMR PCIe
* StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
*/
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
if (!host->preserve_config)
return false;
}
for (i = 0; i < fwspec->num_ids; i++) {
if (fwspec->ids[i] >= id_start &&
fwspec->ids[i] <= id_start + id_count)
return true;
}
return false;
}
static void iort_node_get_rmr_info(struct acpi_iort_node *node,
struct acpi_iort_node *iommu,
struct device *dev, struct list_head *head)
{
struct acpi_iort_node *smmu = NULL;
struct acpi_iort_rmr *rmr;
struct acpi_iort_id_mapping *map;
u32 *sids = NULL;
u32 num_sids = 0;
int i;
if (!node->mapping_offset || !node->mapping_count) {
pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
node);
return;
}
rmr = (struct acpi_iort_rmr *)node->node_data;
if (!rmr->rmr_offset || !rmr->rmr_count)
return;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
node->mapping_offset);
/*
* Go through the ID mappings and see if we have a match for SMMU
* and dev(if !NULL). If found, get the sids for the Node.
* Please note, id_count is equal to the number of IDs in the
* range minus one.
*/
for (i = 0; i < node->mapping_count; i++, map++) {
struct acpi_iort_node *parent;
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (parent != iommu)
continue;
/* If dev is valid, check RMR node corresponds to the dev SID */
if (dev && !iort_rmr_has_dev(dev, map->output_base,
map->id_count))
continue;
/* Retrieve SIDs associated with the Node. */
sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
map->id_count + 1);
if (!sids)
return;
num_sids += map->id_count + 1;
}
if (!sids)
return;
iort_get_rmrs(node, smmu, sids, num_sids, head);
kfree(sids);
}
static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
struct list_head *head)
{
struct acpi_table_iort *iort;
struct acpi_iort_node *iort_node, *iort_end;
int i;
/* Only supports ARM DEN 0049E.d onwards */
if (iort_table->revision < 5)
return;
iort = (struct acpi_table_iort *)iort_table;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort->node_offset);
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort_table->length);
for (i = 0; i < iort->node_count; i++) {
if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
"IORT node pointer overflows, bad table!\n"))
return;
if (iort_node->type == ACPI_IORT_NODE_RMR)
iort_node_get_rmr_info(iort_node, iommu, dev, head);
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
}
}
/*
* Populate the RMR list associated with a given IOMMU and dev(if provided).
* If dev is NULL, the function populates all the RMRs associated with the
* given IOMMU.
*/
static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
struct device *dev,
struct list_head *head)
{
struct acpi_iort_node *iommu;
iommu = iort_get_iort_node(iommu_fwnode);
if (!iommu)
return;
iort_find_rmrs(iommu, dev, head);
}
static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
{
struct acpi_iort_node *iommu;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
iommu = iort_get_iort_node(fwspec->iommu_fwnode);
if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
return iommu;
}
return NULL;
}
/*
* Retrieve platform specific HW MSI reserve regions.
* The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
* associated with the device are the HW MSI reserved regions.
*/
static void iort_iommu_msi_get_resv_regions(struct device *dev,
struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct acpi_iort_its_group *its;
struct acpi_iort_node *iommu_node, *its_node = NULL;
int i;
iommu_node = iort_get_msi_resv_iommu(dev);
if (!iommu_node)
return;
/*
* Current logic to reserve ITS regions relies on HW topologies
* where a given PCI or named component maps its IDs to only one
* ITS group; if a PCI or named component can map its IDs to
* different ITS groups through IORT mappings this function has
* to be reworked to ensure we reserve regions for all ITS groups
* a given PCI or named component may map IDs to.
*/
for (i = 0; i < fwspec->num_ids; i++) {
its_node = iort_node_map_id(iommu_node,
fwspec->ids[i],
NULL, IORT_MSI_TYPE);
if (its_node)
break;
}
if (!its_node)
return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
for (i = 0; i < its->its_count; i++) {
phys_addr_t base;
if (!iort_find_its_base(its->identifiers[i], &base)) {
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
struct iommu_resv_region *region;
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI,
GFP_KERNEL);
if (region)
list_add_tail(®ion->list, head);
}
}
}
/**
* iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
* @dev: Device from iommu_get_resv_regions()
* @head: Reserved region list from iommu_get_resv_regions()
*/
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
iort_iommu_msi_get_resv_regions(dev, head);
iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
}
/**
* iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
* associated StreamIDs information.
* @iommu_fwnode: fwnode associated with IOMMU
* @head: Resereved region list
*/
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head)
{
iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
}
EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
/**
* iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
* @iommu_fwnode: fwnode associated with IOMMU
* @head: Resereved region list
*/
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head)
{
struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
entry->free(NULL, entry);
}
EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
static inline bool iort_iommu_driver_enabled(u8 type)
{
switch (type) {
case ACPI_IORT_NODE_SMMU_V3:
return IS_ENABLED(CONFIG_ARM_SMMU_V3);
case ACPI_IORT_NODE_SMMU:
return IS_ENABLED(CONFIG_ARM_SMMU);
default:
pr_warn("IORT node type %u does not describe an SMMU\n", type);
return false;
}
}
static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
{
struct acpi_iort_root_complex *pci_rc;
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
}
static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
u32 streamid)
{
const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
if (!node)
return -ENODEV;
iort_fwnode = iort_get_fwnode(node);
if (!iort_fwnode)
return -ENODEV;
/*
* If the ops look-up fails, this means that either
* the SMMU drivers have not been probed yet or that
* the SMMU drivers are not built in the kernel;
* Depending on whether the SMMU drivers are built-in
* in the kernel or not, defer the IOMMU configuration
* or just abort it.
*/
ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops)
return iort_iommu_driver_enabled(node->type) ?
-EPROBE_DEFER : -ENODEV;
return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
}
struct iort_pci_alias_info {
struct device *dev;
struct acpi_iort_node *node;
};
static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
{
struct iort_pci_alias_info *info = data;
struct acpi_iort_node *parent;
u32 streamid;
parent = iort_node_map_id(info->node, alias, &streamid,
IORT_IOMMU_TYPE);
return iort_iommu_xlate(info->dev, parent, streamid);
}
static void iort_named_component_init(struct device *dev,
struct acpi_iort_node *node)
{
struct property_entry props[3] = {};
struct acpi_iort_named_component *nc;
nc = (struct acpi_iort_named_component *)node->node_data;
props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
FIELD_GET(ACPI_IORT_NC_PASID_BITS,
nc->node_flags));
if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
if (device_create_managed_software_node(dev, props, NULL))
dev_warn(dev, "Could not add device properties\n");
}
static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
{
struct acpi_iort_node *parent;
int err = -ENODEV, i = 0;
u32 streamid = 0;
do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
return err;
}
static int iort_nc_iommu_map_id(struct device *dev,
struct acpi_iort_node *node,
const u32 *in_id)
{
struct acpi_iort_node *parent;
u32 streamid;
parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
if (parent)
return iort_iommu_xlate(dev, parent, streamid);
return -ENODEV;
}
/**
* iort_iommu_configure_id - Set-up IOMMU configuration for a device.
*
* @dev: device to configure
* @id_in: optional input id const value pointer
*
* Returns: 0 on success, <0 on failure
*/
int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
{
struct acpi_iort_node *node;
int err = -ENODEV;
if (dev_is_pci(dev)) {
struct iommu_fwspec *fwspec;
struct pci_bus *bus = to_pci_dev(dev)->bus;
struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return -ENODEV;
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return -ENODEV;
err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
iort_nc_iommu_map(dev, node);
if (!err)
iort_named_component_init(dev, node);
}
return err;
}
#else
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
{ }
int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
{ return -ENODEV; }
#endif
static int nc_dma_get_range(struct device *dev, u64 *size)
{
struct acpi_iort_node *node;
struct acpi_iort_named_component *ncomp;
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
if (!node)
return -ENODEV;
ncomp = (struct acpi_iort_named_component *)node->node_data;
if (!ncomp->memory_address_limit) {
pr_warn(FW_BUG "Named component missing memory address limit\n");
return -EINVAL;
}
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1ULL<<ncomp->memory_address_limit;
return 0;
}
static int rc_dma_get_range(struct device *dev, u64 *size)
{
struct acpi_iort_node *node;
struct acpi_iort_root_complex *rc;
struct pci_bus *pbus = to_pci_dev(dev)->bus;
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &pbus->dev);
if (!node || node->revision < 1)
return -ENODEV;
rc = (struct acpi_iort_root_complex *)node->node_data;
if (!rc->memory_address_limit) {
pr_warn(FW_BUG "Root complex missing memory address limit\n");
return -EINVAL;
}
*size = rc->memory_address_limit >= 64 ? U64_MAX :
1ULL<<rc->memory_address_limit;
return 0;
}
/**
* iort_dma_get_ranges() - Look up DMA addressing limit for the device
* @dev: device to lookup
* @size: DMA range size result pointer
*
* Return: 0 on success, an error otherwise.
*/
int iort_dma_get_ranges(struct device *dev, u64 *size)
{
if (dev_is_pci(dev))
return rc_dma_get_range(dev, size);
else
return nc_dma_get_range(dev, size);
}
static void __init acpi_iort_register_irq(int hwirq, const char *name,
int trigger,
struct resource *res)
{
int irq = acpi_register_gsi(NULL, hwirq, trigger,
ACPI_ACTIVE_HIGH);
if (irq <= 0) {
pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
name);
return;
}
res->start = irq;
res->end = irq;
res->flags = IORESOURCE_IRQ;
res->name = name;
}
static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
/* Always present mem resource */
int num_res = 1;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->event_gsiv)
num_res++;
if (smmu->pri_gsiv)
num_res++;
if (smmu->gerr_gsiv)
num_res++;
if (smmu->sync_gsiv)
num_res++;
return num_res;
}
static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Cavium ThunderX2 implementation doesn't not support unique
* irq line. Use single irq line for all the SMMUv3 interrupts.
*/
if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return false;
/*
* ThunderX2 doesn't support MSIs from the SMMU, so we're checking
* SPI numbers here.
*/
return smmu->event_gsiv == smmu->pri_gsiv &&
smmu->event_gsiv == smmu->gerr_gsiv &&
smmu->event_gsiv == smmu->sync_gsiv;
}
static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Override the size, for Cavium ThunderX2 implementation
* which doesn't support the page 1 SMMU register space.
*/
if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return SZ_64K;
return SZ_128K;
}
static void __init arm_smmu_v3_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
int num_res = 0;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address +
arm_smmu_v3_resource_size(smmu) - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
if (arm_smmu_v3_is_combined_irq(smmu)) {
if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "combined",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
} else {
if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "eventq",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->pri_gsiv)
acpi_iort_register_irq(smmu->pri_gsiv, "priq",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->gerr_gsiv)
acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
if (smmu->sync_gsiv)
acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
}
}
static void __init arm_smmu_v3_dma_configure(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
enum dev_dma_attr attr;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
dev->dma_mask = &dev->coherent_dma_mask;
/* Configure DMA for the page table walker */
acpi_dma_configure(dev, attr);
}
#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
static int __init arm_smmu_v3_set_proximity(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
int dev_node = pxm_to_node(smmu->pxm);
if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
return -EINVAL;
set_dev_node(dev, dev_node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
}
return 0;
}
#else
#define arm_smmu_v3_set_proximity NULL
#endif
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
/*
* Only consider the global fault interrupt and ignore the
* configuration access interrupt.
*
* MMIO address and global fault interrupt resources are always
* present so add them to the context interrupt count as a static
* value.
*/
return smmu->context_interrupt_count + 2;
}
static void __init arm_smmu_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
int i, hw_irq, trigger, num_res = 0;
u64 *ctx_irq, *glb_irq;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address + smmu->span - 1;
res[num_res].flags = IORESOURCE_MEM;
num_res++;
glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
/* Global IRQs */
hw_irq = IORT_IRQ_MASK(glb_irq[0]);
trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
&res[num_res++]);
/* Context IRQs */
ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
for (i = 0; i < smmu->context_interrupt_count; i++) {
hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
&res[num_res++]);
}
}
static void __init arm_smmu_dma_configure(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
enum dev_dma_attr attr;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
/* We expect the dma masks to be equivalent for SMMU set-ups */
dev->dma_mask = &dev->coherent_dma_mask;
/* Configure DMA for the page table walker */
acpi_dma_configure(dev, attr);
}
static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_pmcg *pmcg;
/* Retrieve PMCG specific data */
pmcg = (struct acpi_iort_pmcg *)node->node_data;
/*
* There are always 2 memory resources.
* If the overflow_gsiv is present then add that for a total of 3.
*/
return pmcg->overflow_gsiv ? 3 : 2;
}
static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
struct acpi_iort_node *node)
{
struct acpi_iort_pmcg *pmcg;
/* Retrieve PMCG specific data */
pmcg = (struct acpi_iort_pmcg *)node->node_data;
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
/*
* The initial version in DEN0049C lacked a way to describe register
* page 1, which makes it broken for most PMCG implementations; in
* that case, just let the driver fail gracefully if it expects to
* find a second memory resource.
*/
if (node->revision > 0) {
res[1].start = pmcg->page1_base_address;
res[1].end = pmcg->page1_base_address + SZ_4K - 1;
res[1].flags = IORESOURCE_MEM;
}
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
ACPI_EDGE_SENSITIVE, &res[2]);
}
static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip08 Platform */
{"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
/* HiSilicon Hip09 Platform */
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};
static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
{
u32 model;
int idx;
idx = acpi_match_platform_list(pmcg_plat_info);
if (idx >= 0)
model = pmcg_plat_info[idx].data;
else
model = IORT_SMMU_V3_PMCG_GENERIC;
return platform_device_add_data(pdev, &model, sizeof(model));
}
struct iort_dev_config {
const char *name;
int (*dev_init)(struct acpi_iort_node *node);
void (*dev_dma_configure)(struct device *dev,
struct acpi_iort_node *node);
int (*dev_count_resources)(struct acpi_iort_node *node);
void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
int (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
int (*dev_add_platdata)(struct platform_device *pdev);
};
static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.dev_dma_configure = arm_smmu_v3_dma_configure,
.dev_count_resources = arm_smmu_v3_count_resources,
.dev_init_resources = arm_smmu_v3_init_resources,
.dev_set_proximity = arm_smmu_v3_set_proximity,
};
static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
.dev_dma_configure = arm_smmu_dma_configure,
.dev_count_resources = arm_smmu_count_resources,
.dev_init_resources = arm_smmu_init_resources,
};
static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
.name = "arm-smmu-v3-pmcg",
.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};
static __init const struct iort_dev_config *iort_get_dev_cfg(
struct acpi_iort_node *node)
{
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
case ACPI_IORT_NODE_PMCG:
return &iort_arm_smmu_v3_pmcg_cfg;
default:
return NULL;
}
}
/**
* iort_add_platform_device() - Allocate a platform device for IORT node
* @node: Pointer to device ACPI IORT node
* @ops: Pointer to IORT device config struct
*
* Returns: 0 on success, <0 failure
*/
static int __init iort_add_platform_device(struct acpi_iort_node *node,
const struct iort_dev_config *ops)
{
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
int ret, count;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
if (ops->dev_set_proximity) {
ret = ops->dev_set_proximity(&pdev->dev, node);
if (ret)
goto dev_put;
}
count = ops->dev_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
if (!r) {
ret = -ENOMEM;
goto dev_put;
}
ops->dev_init_resources(r, node);
ret = platform_device_add_resources(pdev, r, count);
/*
* Resources are duplicated in platform_device_add_resources,
* free their allocated memory
*/
kfree(r);
if (ret)
goto dev_put;
/*
* Platform devices based on PMCG nodes uses platform_data to
* pass the hardware model info to the driver. For others, add
* a copy of IORT node pointer to platform_data to be used to
* retrieve IORT data information.
*/
if (ops->dev_add_platdata)
ret = ops->dev_add_platdata(pdev);
else
ret = platform_device_add_data(pdev, &node, sizeof(node));
if (ret)
goto dev_put;
fwnode = iort_get_fwnode(node);
if (!fwnode) {
ret = -ENODEV;
goto dev_put;
}
pdev->dev.fwnode = fwnode;
if (ops->dev_dma_configure)
ops->dev_dma_configure(&pdev->dev, node);
iort_set_device_domain(&pdev->dev, node);
ret = platform_device_add(pdev);
if (ret)
goto dma_deconfigure;
return 0;
dma_deconfigure:
arch_teardown_dma_ops(&pdev->dev);
dev_put:
platform_device_put(pdev);
return ret;
}
#ifdef CONFIG_PCI
static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
{
static bool acs_enabled __initdata;
if (acs_enabled)
return;
if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
int i;
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
iort_node->mapping_offset);
for (i = 0; i < iort_node->mapping_count; i++, map++) {
if (!map->output_reference)
continue;
parent = ACPI_ADD_PTR(struct acpi_iort_node,
iort_table, map->output_reference);
/*
* If we detect a RC->SMMU mapping, make sure
* we enable ACS on the system.
*/
if ((parent->type == ACPI_IORT_NODE_SMMU) ||
(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
pci_request_acs();
acs_enabled = true;
return;
}
}
}
}
#else
static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
#endif
static void __init iort_init_platform_devices(void)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
const struct iort_dev_config *ops;
/*
* iort_table and iort both point to the start of IORT table, but
* have different struct types
*/
iort = (struct acpi_table_iort *)iort_table;
/* Get the first IORT node */
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort->node_offset);
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort_table->length);
for (i = 0; i < iort->node_count; i++) {
if (iort_node >= iort_end) {
pr_err("iort node pointer overflows, bad table\n");
return;
}
iort_enable_acs(iort_node);
ops = iort_get_dev_cfg(iort_node);
if (ops) {
fwnode = acpi_alloc_fwnode_static();
if (!fwnode)
return;
iort_set_fwnode(iort_node, fwnode);
ret = iort_add_platform_device(iort_node, ops);
if (ret) {
iort_delete_fwnode(iort_node);
acpi_free_fwnode_static(fwnode);
return;
}
}
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
}
}
void __init acpi_iort_init(void)
{
acpi_status status;
/* iort_table will be used at runtime after the iort init,
* so we don't need to call acpi_put_table() to release
* the IORT table mapping.
*/
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_err("Failed to get table, %s\n", msg);
}
return;
}
iort_init_platform_devices();
}
#ifdef CONFIG_ZONE_DMA
/*
* Extract the highest CPU physical address accessible to all DMA masters in
* the system. PHYS_ADDR_MAX is returned when no constrained device is found.
*/
phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
{
phys_addr_t limit = PHYS_ADDR_MAX;
struct acpi_iort_node *node, *end;
struct acpi_table_iort *iort;
acpi_status status;
int i;
if (acpi_disabled)
return limit;
status = acpi_get_table(ACPI_SIG_IORT, 0,
(struct acpi_table_header **)&iort);
if (ACPI_FAILURE(status))
return limit;
node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
for (i = 0; i < iort->node_count; i++) {
if (node >= end)
break;
switch (node->type) {
struct acpi_iort_named_component *ncomp;
struct acpi_iort_root_complex *rc;
phys_addr_t local_limit;
case ACPI_IORT_NODE_NAMED_COMPONENT:
ncomp = (struct acpi_iort_named_component *)node->node_data;
local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
limit = min_not_zero(limit, local_limit);
break;
case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
if (node->revision < 1)
break;
rc = (struct acpi_iort_root_complex *)node->node_data;
local_limit = DMA_BIT_MASK(rc->memory_address_limit);
limit = min_not_zero(limit, local_limit);
break;
}
node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
}
acpi_put_table(&iort->header);
return limit;
}
#endif
| linux-master | drivers/acpi/arm64/iort.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARM Specific GTDT table Support
*
* Copyright (C) 2016, Linaro Ltd.
* Author: Daniel Lezcano <[email protected]>
* Fu Wei <[email protected]>
* Hanjun Guo <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <clocksource/arm_arch_timer.h>
#undef pr_fmt
#define pr_fmt(fmt) "ACPI GTDT: " fmt
/**
* struct acpi_gtdt_descriptor - Store the key info of GTDT for all functions
* @gtdt: The pointer to the struct acpi_table_gtdt of GTDT table.
* @gtdt_end: The pointer to the end of GTDT table.
* @platform_timer: The pointer to the start of Platform Timer Structure
*
* The struct store the key info of GTDT table, it should be initialized by
* acpi_gtdt_init.
*/
struct acpi_gtdt_descriptor {
struct acpi_table_gtdt *gtdt;
void *gtdt_end;
void *platform_timer;
};
static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata;
static inline __init void *next_platform_timer(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
platform_timer += gh->length;
if (platform_timer < acpi_gtdt_desc.gtdt_end)
return platform_timer;
return NULL;
}
#define for_each_platform_timer(_g) \
for (_g = acpi_gtdt_desc.platform_timer; _g; \
_g = next_platform_timer(_g))
static inline bool is_timer_block(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
return gh->type == ACPI_GTDT_TYPE_TIMER_BLOCK;
}
static inline bool is_non_secure_watchdog(void *platform_timer)
{
struct acpi_gtdt_header *gh = platform_timer;
struct acpi_gtdt_watchdog *wd = platform_timer;
if (gh->type != ACPI_GTDT_TYPE_WATCHDOG)
return false;
return !(wd->timer_flags & ACPI_GTDT_WATCHDOG_SECURE);
}
static int __init map_gt_gsi(u32 interrupt, u32 flags)
{
int trigger, polarity;
trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
: ACPI_LEVEL_SENSITIVE;
polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
: ACPI_ACTIVE_HIGH;
return acpi_register_gsi(NULL, interrupt, trigger, polarity);
}
/**
* acpi_gtdt_map_ppi() - Map the PPIs of per-cpu arch_timer.
* @type: the type of PPI.
*
* Note: Secure state is not managed by the kernel on ARM64 systems.
* So we only handle the non-secure timer PPIs,
* ARCH_TIMER_PHYS_SECURE_PPI is treated as invalid type.
*
* Return: the mapped PPI value, 0 if error.
*/
int __init acpi_gtdt_map_ppi(int type)
{
struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt;
switch (type) {
case ARCH_TIMER_PHYS_NONSECURE_PPI:
return map_gt_gsi(gtdt->non_secure_el1_interrupt,
gtdt->non_secure_el1_flags);
case ARCH_TIMER_VIRT_PPI:
return map_gt_gsi(gtdt->virtual_timer_interrupt,
gtdt->virtual_timer_flags);
case ARCH_TIMER_HYP_PPI:
return map_gt_gsi(gtdt->non_secure_el2_interrupt,
gtdt->non_secure_el2_flags);
default:
pr_err("Failed to map timer interrupt: invalid type.\n");
}
return 0;
}
/**
* acpi_gtdt_c3stop() - Got c3stop info from GTDT according to the type of PPI.
* @type: the type of PPI.
*
* Return: true if the timer HW state is lost when a CPU enters an idle state,
* false otherwise
*/
bool __init acpi_gtdt_c3stop(int type)
{
struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt;
switch (type) {
case ARCH_TIMER_PHYS_NONSECURE_PPI:
return !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
case ARCH_TIMER_VIRT_PPI:
return !(gtdt->virtual_timer_flags & ACPI_GTDT_ALWAYS_ON);
case ARCH_TIMER_HYP_PPI:
return !(gtdt->non_secure_el2_flags & ACPI_GTDT_ALWAYS_ON);
default:
pr_err("Failed to get c3stop info: invalid type.\n");
}
return false;
}
/**
* acpi_gtdt_init() - Get the info of GTDT table to prepare for further init.
* @table: The pointer to GTDT table.
* @platform_timer_count: It points to a integer variable which is used
* for storing the number of platform timers.
* This pointer could be NULL, if the caller
* doesn't need this info.
*
* Return: 0 if success, -EINVAL if error.
*/
int __init acpi_gtdt_init(struct acpi_table_header *table,
int *platform_timer_count)
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
acpi_gtdt_desc.gtdt_end = (void *)table + table->length;
acpi_gtdt_desc.platform_timer = NULL;
if (platform_timer_count)
*platform_timer_count = 0;
if (table->revision < 2) {
pr_warn("Revision:%d doesn't support Platform Timers.\n",
table->revision);
return 0;
}
if (!gtdt->platform_timer_count) {
pr_debug("No Platform Timer.\n");
return 0;
}
platform_timer = (void *)gtdt + gtdt->platform_timer_offset;
if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) {
pr_err(FW_BUG "invalid timer data.\n");
return -EINVAL;
}
acpi_gtdt_desc.platform_timer = platform_timer;
if (platform_timer_count)
*platform_timer_count = gtdt->platform_timer_count;
return 0;
}
static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
struct arch_timer_mem *timer_mem)
{
int i;
struct arch_timer_mem_frame *frame;
struct acpi_gtdt_timer_entry *gtdt_frame;
if (!block->timer_count) {
pr_err(FW_BUG "GT block present, but frame count is zero.\n");
return -ENODEV;
}
if (block->timer_count > ARCH_TIMER_MEM_MAX_FRAMES) {
pr_err(FW_BUG "GT block lists %d frames, ACPI spec only allows 8\n",
block->timer_count);
return -EINVAL;
}
timer_mem->cntctlbase = (phys_addr_t)block->block_address;
/*
* The CNTCTLBase frame is 4KB (register offsets 0x000 - 0xFFC).
* See ARM DDI 0487A.k_iss10775, page I1-5129, Table I1-3
* "CNTCTLBase memory map".
*/
timer_mem->size = SZ_4K;
gtdt_frame = (void *)block + block->timer_offset;
if (gtdt_frame + block->timer_count != (void *)block + block->header.length)
return -EINVAL;
/*
* Get the GT timer Frame data for every GT Block Timer
*/
for (i = 0; i < block->timer_count; i++, gtdt_frame++) {
if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER)
continue;
if (gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES ||
!gtdt_frame->base_address || !gtdt_frame->timer_interrupt)
goto error;
frame = &timer_mem->frame[gtdt_frame->frame_number];
/* duplicate frame */
if (frame->valid)
goto error;
frame->phys_irq = map_gt_gsi(gtdt_frame->timer_interrupt,
gtdt_frame->timer_flags);
if (frame->phys_irq <= 0) {
pr_warn("failed to map physical timer irq in frame %d.\n",
gtdt_frame->frame_number);
goto error;
}
if (gtdt_frame->virtual_timer_interrupt) {
frame->virt_irq =
map_gt_gsi(gtdt_frame->virtual_timer_interrupt,
gtdt_frame->virtual_timer_flags);
if (frame->virt_irq <= 0) {
pr_warn("failed to map virtual timer irq in frame %d.\n",
gtdt_frame->frame_number);
goto error;
}
} else {
pr_debug("virtual timer in frame %d not implemented.\n",
gtdt_frame->frame_number);
}
frame->cntbase = gtdt_frame->base_address;
/*
* The CNTBaseN frame is 4KB (register offsets 0x000 - 0xFFC).
* See ARM DDI 0487A.k_iss10775, page I1-5130, Table I1-4
* "CNTBaseN memory map".
*/
frame->size = SZ_4K;
frame->valid = true;
}
return 0;
error:
do {
if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER ||
gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES)
continue;
frame = &timer_mem->frame[gtdt_frame->frame_number];
if (frame->phys_irq > 0)
acpi_unregister_gsi(gtdt_frame->timer_interrupt);
frame->phys_irq = 0;
if (frame->virt_irq > 0)
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
frame->virt_irq = 0;
} while (i-- >= 0 && gtdt_frame--);
return -EINVAL;
}
/**
* acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table.
* @timer_mem: The pointer to the array of struct arch_timer_mem for returning
* the result of parsing. The element number of this array should
* be platform_timer_count(the total number of platform timers).
* @timer_count: It points to a integer variable which is used for storing the
* number of GT blocks we have parsed.
*
* Return: 0 if success, -EINVAL/-ENODEV if error.
*/
int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem,
int *timer_count)
{
int ret;
void *platform_timer;
*timer_count = 0;
for_each_platform_timer(platform_timer) {
if (is_timer_block(platform_timer)) {
ret = gtdt_parse_timer_block(platform_timer, timer_mem);
if (ret)
return ret;
timer_mem++;
(*timer_count)++;
}
}
if (*timer_count)
pr_info("found %d memory-mapped timer block(s).\n",
*timer_count);
return 0;
}
/*
* Initialize a SBSA generic Watchdog platform device info from GTDT
*/
static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
int index)
{
struct platform_device *pdev;
int irq;
/*
* According to SBSA specification the size of refresh and control
* frames of SBSA Generic Watchdog is SZ_4K(Offset 0x000 – 0xFFF).
*/
struct resource res[] = {
DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
{},
};
int nr_res = ARRAY_SIZE(res);
pr_debug("found a Watchdog (0x%llx/0x%llx gsi:%u flags:0x%x).\n",
wd->refresh_frame_address, wd->control_frame_address,
wd->timer_interrupt, wd->timer_flags);
if (!(wd->refresh_frame_address && wd->control_frame_address)) {
pr_err(FW_BUG "failed to get the Watchdog base address.\n");
return -EINVAL;
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
res[2] = (struct resource)DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
}
/*
* Add a platform device named "sbsa-gwdt" to match the platform driver.
* "sbsa-gwdt": SBSA(Server Base System Architecture) Generic Watchdog
* The platform driver can get device info below by matching this name.
*/
pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
if (IS_ERR(pdev)) {
if (irq > 0)
acpi_unregister_gsi(wd->timer_interrupt);
return PTR_ERR(pdev);
}
return 0;
}
static int __init gtdt_sbsa_gwdt_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
int ret, timer_count, gwdt_count = 0;
if (acpi_disabled)
return 0;
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_GTDT, 0, &table)))
return -EINVAL;
/*
* Note: Even though the global variable acpi_gtdt_desc has been
* initialized by acpi_gtdt_init() while initializing the arch timers,
* when we call this function to get SBSA watchdogs info from GTDT, the
* pointers stashed in it are stale (since they are early temporary
* mappings carried out before acpi_permanent_mmap is set) and we need
* to re-initialize them with permanent mapped pointer values to let the
* GTDT parsing possible.
*/
ret = acpi_gtdt_init(table, &timer_count);
if (ret || !timer_count)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
break;
gwdt_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
device_initcall(gtdt_sbsa_gwdt_init);
| linux-master | drivers/acpi/arm64/gtdt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI support for platform bus type.
*
* Copyright (C) 2015, Linaro Ltd
* Author: Graeme Gregory <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/amba/bus.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "init.h"
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
{"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
{"ARMHC501", 0}, /* ARM CoreSight ETR */
{"ARMHC502", 0}, /* ARM CoreSight STM */
{"ARMHC503", 0}, /* ARM CoreSight Debug */
{"ARMHC979", 0}, /* ARM CoreSight TPIU */
{"ARMHC97C", 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
{"ARMHC98D", 0}, /* ARM CoreSight Dynamic Replicator */
{"ARMHC9CA", 0}, /* ARM CoreSight CATU */
{"ARMHC9FF", 0}, /* ARM CoreSight Dynamic Funnel */
{"", 0},
};
static void amba_register_dummy_clk(void)
{
static struct clk *amba_dummy_clk;
/* If clock already registered */
if (amba_dummy_clk)
return;
amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
}
static int amba_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
struct acpi_device *parent = acpi_dev_parent(adev);
struct amba_device *dev;
struct resource_entry *rentry;
struct list_head resource_list;
bool address_found = false;
int irq_no = 0;
int ret;
/* If the ACPI node already has a physical device attached, skip it. */
if (adev->physical_node_count)
return 0;
dev = amba_device_alloc(dev_name(&adev->dev), 0, 0);
if (!dev) {
dev_err(&adev->dev, "%s(): amba_device_alloc() failed\n",
__func__);
return -ENOMEM;
}
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
goto err_free;
list_for_each_entry(rentry, &resource_list, node) {
switch (resource_type(rentry->res)) {
case IORESOURCE_MEM:
if (!address_found) {
dev->res = *rentry->res;
dev->res.name = dev_name(&dev->dev);
address_found = true;
}
break;
case IORESOURCE_IRQ:
if (irq_no < AMBA_NR_IRQS)
dev->irq[irq_no++] = rentry->res->start;
break;
default:
dev_warn(&adev->dev, "Invalid resource\n");
break;
}
}
acpi_dev_free_resource_list(&resource_list);
/*
* If the ACPI node has a parent and that parent has a physical device
* attached to it, that physical device should be the parent of
* the amba device we are about to create.
*/
if (parent)
dev->dev.parent = acpi_get_first_physical_node(parent);
ACPI_COMPANION_SET(&dev->dev, adev);
ret = amba_device_add(dev, &iomem_resource);
if (ret) {
dev_err(&adev->dev, "%s(): amba_device_add() failed (%d)\n",
__func__, ret);
goto err_free;
}
return 1;
err_free:
amba_device_put(dev);
return ret;
}
static struct acpi_scan_handler amba_handler = {
.ids = amba_id_list,
.attach = amba_handler_attach,
};
void __init acpi_amba_init(void)
{
amba_register_dummy_clk();
acpi_scan_add_handler(&amba_handler);
}
| linux-master | drivers/acpi/arm64/amba.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Trusted Foundations support for ARM CPUs
*
* Copyright (c) 2013, NVIDIA Corporation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/firmware/trusted_foundations.h>
#include <asm/firmware.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/outercache.h>
#define TF_CACHE_MAINT 0xfffff100
#define TF_CACHE_ENABLE 1
#define TF_CACHE_DISABLE 2
#define TF_CACHE_REENABLE 4
#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
#define TF_CPU_PM 0xfffffffc
#define TF_CPU_PM_S3 0xffffffe3
#define TF_CPU_PM_S2 0xffffffe6
#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
#define TF_CPU_PM_S1 0xffffffe4
#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
static unsigned long cpu_boot_addr;
static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
{
register u32 r0 asm("r0") = type;
register u32 r1 asm("r1") = arg1;
register u32 r2 asm("r2") = arg2;
asm volatile(
".arch_extension sec\n\t"
"stmfd sp!, {r4 - r11}\n\t"
__asmeq("%0", "r0")
__asmeq("%1", "r1")
__asmeq("%2", "r2")
"mov r3, #0\n\t"
"mov r4, #0\n\t"
"smc #0\n\t"
"ldmfd sp!, {r4 - r11}\n\t"
:
: "r" (r0), "r" (r1), "r" (r2)
: "memory", "r3", "r12", "lr");
}
static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
{
cpu_boot_addr = boot_addr;
tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
return 0;
}
static int tf_prepare_idle(unsigned long mode)
{
switch (mode) {
case TF_PM_MODE_LP0:
tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
break;
case TF_PM_MODE_LP1:
tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
break;
case TF_PM_MODE_LP1_NO_MC_CLK:
tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
cpu_boot_addr);
break;
case TF_PM_MODE_LP2:
tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
break;
case TF_PM_MODE_LP2_NOFLUSH_L2:
tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
cpu_boot_addr);
break;
case TF_PM_MODE_NONE:
break;
default:
return -EINVAL;
}
tf_idle_mode = mode;
return 0;
}
#ifdef CONFIG_CACHE_L2X0
static void tf_cache_write_sec(unsigned long val, unsigned int reg)
{
u32 enable_op, l2x0_way_mask = 0xff;
switch (reg) {
case L2X0_CTRL:
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
l2x0_way_mask = 0xffff;
switch (tf_idle_mode) {
case TF_PM_MODE_LP2:
enable_op = TF_CACHE_REENABLE;
break;
default:
enable_op = TF_CACHE_ENABLE;
break;
}
if (val == L2X0_CTRL_EN)
tf_generic_smc(TF_CACHE_MAINT, enable_op,
l2x0_saved_regs.aux_ctrl);
else
tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
l2x0_way_mask);
break;
default:
break;
}
}
static int tf_init_cache(void)
{
outer_cache.write_sec = tf_cache_write_sec;
return 0;
}
#endif /* CONFIG_CACHE_L2X0 */
static const struct firmware_ops trusted_foundations_ops = {
.set_cpu_boot_addr = tf_set_cpu_boot_addr,
.prepare_idle = tf_prepare_idle,
#ifdef CONFIG_CACHE_L2X0
.l2x0_init = tf_init_cache,
#endif
};
void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
{
/*
* we are not using version information for now since currently
* supported SMCs are compatible with all TF releases
*/
register_firmware_ops(&trusted_foundations_ops);
}
void of_register_trusted_foundations(void)
{
struct device_node *node;
struct trusted_foundations_platform_data pdata;
int err;
node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
if (!node)
return;
err = of_property_read_u32(node, "tlm,version-major",
&pdata.version_major);
if (err != 0)
panic("Trusted Foundation: missing version-major property\n");
err = of_property_read_u32(node, "tlm,version-minor",
&pdata.version_minor);
if (err != 0)
panic("Trusted Foundation: missing version-minor property\n");
register_trusted_foundations(&pdata);
}
bool trusted_foundations_registered(void)
{
return firmware_ops == &trusted_foundations_ops;
}
| linux-master | drivers/firmware/trusted_foundations.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Parse the EFI PCDP table to locate the console device.
*
* (c) Copyright 2002, 2003, 2004 Hewlett-Packard Development Company, L.P.
* Khalid Aziz <[email protected]>
* Alex Williamson <[email protected]>
* Bjorn Helgaas <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <asm/vga.h>
#include "pcdp.h"
static int __init
setup_serial_console(struct pcdp_uart *uart)
{
#ifdef CONFIG_SERIAL_8250_CONSOLE
int mmio;
static char options[64], *p = options;
char parity;
mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
p += sprintf(p, "uart8250,%s,0x%llx",
mmio ? "mmio" : "io", uart->addr.address);
if (uart->baud) {
p += sprintf(p, ",%llu", uart->baud);
if (uart->bits) {
switch (uart->parity) {
case 0x2: parity = 'e'; break;
case 0x3: parity = 'o'; break;
default: parity = 'n';
}
p += sprintf(p, "%c%d", parity, uart->bits);
}
}
add_preferred_console("uart", 8250, &options[9]);
return setup_earlycon(options);
#else
return -ENODEV;
#endif
}
static int __init
setup_vga_console(struct pcdp_device *dev)
{
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
u8 *if_ptr;
if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
if (if_ptr[0] == PCDP_IF_PCI) {
struct pcdp_if_pci if_pci;
/* struct copy since ifptr might not be correctly aligned */
memcpy(&if_pci, if_ptr, sizeof(if_pci));
if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
vga_console_iobase = if_pci.ioport_tra;
if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
vga_console_membase = if_pci.mmio_tra;
}
if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
return -ENODEV;
}
conswitchp = &vga_con;
printk(KERN_INFO "PCDP: VGA console\n");
return 0;
#else
return -ENODEV;
#endif
}
extern unsigned long hcdp_phys;
int __init
efi_setup_pcdp_console(char *cmdline)
{
struct pcdp *pcdp;
struct pcdp_uart *uart;
struct pcdp_device *dev, *end;
int i, serial = 0;
int rc = -ENODEV;
if (hcdp_phys == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
pcdp = early_memremap(hcdp_phys, 4096);
printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, hcdp_phys);
if (strstr(cmdline, "console=hcdp")) {
if (pcdp->rev < 3)
serial = 1;
} else if (strstr(cmdline, "console=")) {
printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
goto out;
}
if (pcdp->rev < 3 && efi_uart_console_only())
serial = 1;
for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
if (uart->type == PCDP_CONSOLE_UART) {
rc = setup_serial_console(uart);
goto out;
}
}
}
end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length);
for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts);
dev < end;
dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
if (dev->flags & PCDP_PRIMARY_CONSOLE) {
if (dev->type == PCDP_CONSOLE_VGA) {
rc = setup_vga_console(dev);
goto out;
}
}
}
out:
early_memunmap(pcdp, 4096);
return rc;
}
| linux-master | drivers/firmware/pcdp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/firmware/memmap.c
* Copyright (C) 2008 SUSE LINUX Products GmbH
* by Bernhard Walle <[email protected]>
*/
#include <linux/string.h>
#include <linux/firmware-map.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/mm.h>
/*
* Data types ------------------------------------------------------------------
*/
/*
* Firmware map entry. Because firmware memory maps are flat and not
* hierarchical, it's ok to organise them in a linked list. No parent
* information is necessary as for the resource tree.
*/
struct firmware_map_entry {
/*
* start and end must be u64 rather than resource_size_t, because e820
* resources can lie at addresses above 4G.
*/
u64 start; /* start of the memory range */
u64 end; /* end of the memory range (incl.) */
const char *type; /* type of the memory range */
struct list_head list; /* entry for the linked list */
struct kobject kobj; /* kobject for each entry */
};
/*
* Forward declarations --------------------------------------------------------
*/
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf);
static ssize_t start_show(struct firmware_map_entry *entry, char *buf);
static ssize_t end_show(struct firmware_map_entry *entry, char *buf);
static ssize_t type_show(struct firmware_map_entry *entry, char *buf);
static struct firmware_map_entry * __meminit
firmware_map_find_entry(u64 start, u64 end, const char *type);
/*
* Static data -----------------------------------------------------------------
*/
struct memmap_attribute {
struct attribute attr;
ssize_t (*show)(struct firmware_map_entry *entry, char *buf);
};
static struct memmap_attribute memmap_start_attr = __ATTR_RO(start);
static struct memmap_attribute memmap_end_attr = __ATTR_RO(end);
static struct memmap_attribute memmap_type_attr = __ATTR_RO(type);
/*
* These are default attributes that are added for every memmap entry.
*/
static struct attribute *def_attrs[] = {
&memmap_start_attr.attr,
&memmap_end_attr.attr,
&memmap_type_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(def);
static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
};
/* Firmware memory map entries. */
static LIST_HEAD(map_entries);
static DEFINE_SPINLOCK(map_entries_lock);
/*
* For memory hotplug, there is no way to free memory map entries allocated
* by boot mem after the system is up. So when we hot-remove memory whose
* map entry is allocated by bootmem, we need to remember the storage and
* reuse it when the memory is hot-added again.
*/
static LIST_HEAD(map_entries_bootmem);
static DEFINE_SPINLOCK(map_entries_bootmem_lock);
static inline struct firmware_map_entry *
to_memmap_entry(struct kobject *kobj)
{
return container_of(kobj, struct firmware_map_entry, kobj);
}
static void __meminit release_firmware_map_entry(struct kobject *kobj)
{
struct firmware_map_entry *entry = to_memmap_entry(kobj);
if (PageReserved(virt_to_page(entry))) {
/*
* Remember the storage allocated by bootmem, and reuse it when
* the memory is hot-added again. The entry will be added to
* map_entries_bootmem here, and deleted from &map_entries in
* firmware_map_remove_entry().
*/
spin_lock(&map_entries_bootmem_lock);
list_add(&entry->list, &map_entries_bootmem);
spin_unlock(&map_entries_bootmem_lock);
return;
}
kfree(entry);
}
static struct kobj_type __refdata memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_groups = def_groups,
};
/*
* Registration functions ------------------------------------------------------
*/
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
* @end: End of the memory range (exclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
*
* Common implementation of firmware_map_add() and firmware_map_add_early()
* which expects a pre-allocated struct firmware_map_entry.
*
* Return: 0 always
*/
static int firmware_map_add_entry(u64 start, u64 end,
const char *type,
struct firmware_map_entry *entry)
{
BUG_ON(start > end);
entry->start = start;
entry->end = end - 1;
entry->type = type;
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
spin_lock(&map_entries_lock);
list_add_tail(&entry->list, &map_entries);
spin_unlock(&map_entries_lock);
return 0;
}
/**
* firmware_map_remove_entry() - Does the real work to remove a firmware
* memmap entry.
* @entry: removed entry.
*
* The caller must hold map_entries_lock, and release it properly.
*/
static inline void firmware_map_remove_entry(struct firmware_map_entry *entry)
{
list_del(&entry->list);
}
/*
* Add memmap entry on sysfs
*/
static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
{
static int map_entries_nr;
static struct kset *mmap_kset;
if (entry->kobj.state_in_sysfs)
return -EEXIST;
if (!mmap_kset) {
mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
if (!mmap_kset)
return -ENOMEM;
}
entry->kobj.kset = mmap_kset;
if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
kobject_put(&entry->kobj);
return 0;
}
/*
* Remove memmap entry on sysfs
*/
static inline void remove_sysfs_fw_map_entry(struct firmware_map_entry *entry)
{
kobject_put(&entry->kobj);
}
/**
* firmware_map_find_entry_in_list() - Search memmap entry in a given list.
* @start: Start of the memory range.
* @end: End of the memory range (exclusive).
* @type: Type of the memory range.
* @list: In which to find the entry.
*
* This function is to find the memmap entey of a given memory range in a
* given list. The caller must hold map_entries_lock, and must not release
* the lock until the processing of the returned entry has completed.
*
* Return: Pointer to the entry to be found on success, or NULL on failure.
*/
static struct firmware_map_entry * __meminit
firmware_map_find_entry_in_list(u64 start, u64 end, const char *type,
struct list_head *list)
{
struct firmware_map_entry *entry;
list_for_each_entry(entry, list, list)
if ((entry->start == start) && (entry->end == end) &&
(!strcmp(entry->type, type))) {
return entry;
}
return NULL;
}
/**
* firmware_map_find_entry() - Search memmap entry in map_entries.
* @start: Start of the memory range.
* @end: End of the memory range (exclusive).
* @type: Type of the memory range.
*
* This function is to find the memmap entey of a given memory range.
* The caller must hold map_entries_lock, and must not release the lock
* until the processing of the returned entry has completed.
*
* Return: Pointer to the entry to be found on success, or NULL on failure.
*/
static struct firmware_map_entry * __meminit
firmware_map_find_entry(u64 start, u64 end, const char *type)
{
return firmware_map_find_entry_in_list(start, end, type, &map_entries);
}
/**
* firmware_map_find_entry_bootmem() - Search memmap entry in map_entries_bootmem.
* @start: Start of the memory range.
* @end: End of the memory range (exclusive).
* @type: Type of the memory range.
*
* This function is similar to firmware_map_find_entry except that it find the
* given entry in map_entries_bootmem.
*
* Return: Pointer to the entry to be found on success, or NULL on failure.
*/
static struct firmware_map_entry * __meminit
firmware_map_find_entry_bootmem(u64 start, u64 end, const char *type)
{
return firmware_map_find_entry_in_list(start, end, type,
&map_entries_bootmem);
}
/**
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
* @start: Start of the memory range.
* @end: End of the memory range (exclusive)
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function is for memory hotplug, it is
* similar to function firmware_map_add_early(). The only difference is that
* it will create the syfs entry dynamically.
*
* Return: 0 on success, or -ENOMEM if no memory could be allocated.
*/
int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = firmware_map_find_entry(start, end - 1, type);
if (entry)
return 0;
entry = firmware_map_find_entry_bootmem(start, end - 1, type);
if (!entry) {
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
} else {
/* Reuse storage allocated by bootmem. */
spin_lock(&map_entries_bootmem_lock);
list_del(&entry->list);
spin_unlock(&map_entries_bootmem_lock);
memset(entry, 0, sizeof(*entry));
}
firmware_map_add_entry(start, end, type, entry);
/* create the memmap entry */
add_sysfs_fw_map_entry(entry);
return 0;
}
/**
* firmware_map_add_early() - Adds a firmware mapping entry.
* @start: Start of the memory range.
* @end: End of the memory range.
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function uses the bootmem allocator
* for memory allocation.
*
* That function must be called before late_initcall.
*
* Return: 0 on success, or -ENOMEM if no memory could be allocated.
*/
int __init firmware_map_add_early(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
entry = memblock_alloc(sizeof(struct firmware_map_entry),
SMP_CACHE_BYTES);
if (WARN_ON(!entry))
return -ENOMEM;
return firmware_map_add_entry(start, end, type, entry);
}
/**
* firmware_map_remove() - remove a firmware mapping entry
* @start: Start of the memory range.
* @end: End of the memory range.
* @type: Type of the memory range.
*
* removes a firmware mapping entry.
*
* Return: 0 on success, or -EINVAL if no entry.
*/
int __meminit firmware_map_remove(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
spin_lock(&map_entries_lock);
entry = firmware_map_find_entry(start, end - 1, type);
if (!entry) {
spin_unlock(&map_entries_lock);
return -EINVAL;
}
firmware_map_remove_entry(entry);
spin_unlock(&map_entries_lock);
/* remove the memmap entry */
remove_sysfs_fw_map_entry(entry);
return 0;
}
/*
* Sysfs functions -------------------------------------------------------------
*/
static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->start);
}
static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long)entry->end);
}
static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", entry->type);
}
static inline struct memmap_attribute *to_memmap_attr(struct attribute *attr)
{
return container_of(attr, struct memmap_attribute, attr);
}
static ssize_t memmap_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct firmware_map_entry *entry = to_memmap_entry(kobj);
struct memmap_attribute *memmap_attr = to_memmap_attr(attr);
return memmap_attr->show(entry, buf);
}
/*
* Initialises stuff and adds the entries in the map_entries list to
* sysfs. Important is that firmware_map_add() and firmware_map_add_early()
* must be called before late_initcall. That's just because that function
* is called as late_initcall() function, which means that if you call
* firmware_map_add() or firmware_map_add_early() afterwards, the entries
* are not added to sysfs.
*/
static int __init firmware_memmap_init(void)
{
struct firmware_map_entry *entry;
list_for_each_entry(entry, &map_entries, list)
add_sysfs_fw_map_entry(entry);
return 0;
}
late_initcall(firmware_memmap_init);
| linux-master | drivers/firmware/memmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Corporation. All rights reserved.
* Author: Allen-KH Cheng <[email protected]>
*/
#include <linux/firmware/mediatek/mtk-adsp-ipc.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" };
/*
* mtk_adsp_ipc_send - send ipc cmd to MTK ADSP
*
* @ipc: ADSP IPC handle
* @idx: index of the mailbox channel
* @msg: IPC cmd (reply or request)
*
* Returns zero for success from mbox_send_message
* negative value for error
*/
int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t msg)
{
struct mtk_adsp_chan *adsp_chan;
int ret;
if (idx >= MTK_ADSP_MBOX_NUM)
return -EINVAL;
adsp_chan = &ipc->chans[idx];
ret = mbox_send_message(adsp_chan->ch, &msg);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(mtk_adsp_ipc_send);
/*
* mtk_adsp_ipc_recv - recv callback used by MTK ADSP mailbox
*
* @c: mbox client
* @msg: message received
*
* Users of ADSP IPC will need to privde handle_reply and handle_request
* callbacks.
*/
static void mtk_adsp_ipc_recv(struct mbox_client *c, void *msg)
{
struct mtk_adsp_chan *chan = container_of(c, struct mtk_adsp_chan, cl);
struct device *dev = c->dev;
switch (chan->idx) {
case MTK_ADSP_MBOX_REPLY:
chan->ipc->ops->handle_reply(chan->ipc);
break;
case MTK_ADSP_MBOX_REQUEST:
chan->ipc->ops->handle_request(chan->ipc);
break;
default:
dev_err(dev, "wrong mbox chan %d\n", chan->idx);
break;
}
}
static int mtk_adsp_ipc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_adsp_ipc *adsp_ipc;
struct mtk_adsp_chan *adsp_chan;
struct mbox_client *cl;
int ret;
int i, j;
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
adsp_ipc = devm_kzalloc(dev, sizeof(*adsp_ipc), GFP_KERNEL);
if (!adsp_ipc)
return -ENOMEM;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
adsp_chan = &adsp_ipc->chans[i];
cl = &adsp_chan->cl;
cl->dev = dev->parent;
cl->tx_block = false;
cl->knows_txdone = false;
cl->tx_prepare = NULL;
cl->rx_callback = mtk_adsp_ipc_recv;
adsp_chan->ipc = adsp_ipc;
adsp_chan->idx = i;
adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]);
if (IS_ERR(adsp_chan->ch)) {
ret = PTR_ERR(adsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
adsp_mbox_ch_names[i], ret);
for (j = 0; j < i; j++) {
adsp_chan = &adsp_ipc->chans[j];
mbox_free_channel(adsp_chan->ch);
}
return ret;
}
}
adsp_ipc->dev = dev;
dev_set_drvdata(dev, adsp_ipc);
dev_dbg(dev, "MTK ADSP IPC initialized\n");
return 0;
}
static int mtk_adsp_ipc_remove(struct platform_device *pdev)
{
struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev);
struct mtk_adsp_chan *adsp_chan;
int i;
for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) {
adsp_chan = &adsp_ipc->chans[i];
mbox_free_channel(adsp_chan->ch);
}
return 0;
}
static struct platform_driver mtk_adsp_ipc_driver = {
.driver = {
.name = "mtk-adsp-ipc",
},
.probe = mtk_adsp_ipc_probe,
.remove = mtk_adsp_ipc_remove,
};
builtin_platform_driver(mtk_adsp_ipc_driver);
MODULE_AUTHOR("Allen-KH Cheng <[email protected]>");
MODULE_DESCRIPTION("MTK ADSP IPC Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/mtk-adsp-ipc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments System Control Interface Protocol Driver
*
* Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti-msgmgr.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/reboot.h>
#include "ti_sci.h"
/* List of all TI SCI devices active in system */
static LIST_HEAD(ti_sci_list);
/* Protection for the entire list */
static DEFINE_MUTEX(ti_sci_list_mutex);
/**
* struct ti_sci_xfer - Structure representing a message flow
* @tx_message: Transmit message
* @rx_len: Receive message length
* @xfer_buf: Preallocated buffer to store receive message
* Since we work with request-ACK protocol, we can
* reuse the same buffer for the rx path as we
* use for the tx path.
* @done: completion event
*/
struct ti_sci_xfer {
struct ti_msgmgr_message tx_message;
u8 rx_len;
u8 *xfer_buf;
struct completion done;
};
/**
* struct ti_sci_xfers_info - Structure to manage transfer information
* @sem_xfer_count: Counting Semaphore for managing max simultaneous
* Messages.
* @xfer_block: Preallocated Message array
* @xfer_alloc_table: Bitmap table for allocated messages.
* Index of this bitmap table is also used for message
* sequence identifier.
* @xfer_lock: Protection for message allocation
*/
struct ti_sci_xfers_info {
struct semaphore sem_xfer_count;
struct ti_sci_xfer *xfer_block;
unsigned long *xfer_alloc_table;
/* protect transfer allocation */
spinlock_t xfer_lock;
};
/**
* struct ti_sci_desc - Description of SoC integration
* @default_host_id: Host identifier representing the compute entity
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msgs: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
*/
struct ti_sci_desc {
u8 default_host_id;
int max_rx_timeout_ms;
int max_msgs;
int max_msg_size;
};
/**
* struct ti_sci_info - Structure representing a TI SCI instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @nb: Reboot Notifier block
* @d: Debugfs file entry
* @debug_region: Memory region where the debug message are available
* @debug_region_size: Debug region size
* @debug_buffer: Buffer allocated to copy debug messages.
* @handle: Instance of TI SCI handle to send to clients.
* @cl: Mailbox Client
* @chan_tx: Transmit mailbox channel
* @chan_rx: Receive mailbox channel
* @minfo: Message info
* @node: list head
* @host_id: Host ID
* @users: Number of users of this instance
*/
struct ti_sci_info {
struct device *dev;
struct notifier_block nb;
const struct ti_sci_desc *desc;
struct dentry *d;
void __iomem *debug_region;
char *debug_buffer;
size_t debug_region_size;
struct ti_sci_handle handle;
struct mbox_client cl;
struct mbox_chan *chan_tx;
struct mbox_chan *chan_rx;
struct ti_sci_xfers_info minfo;
struct list_head node;
u8 host_id;
/* protected by ti_sci_list_mutex */
int users;
};
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
#ifdef CONFIG_DEBUG_FS
/**
* ti_sci_debug_show() - Helper to dump the debug log
* @s: sequence file pointer
* @unused: unused.
*
* Return: 0
*/
static int ti_sci_debug_show(struct seq_file *s, void *unused)
{
struct ti_sci_info *info = s->private;
memcpy_fromio(info->debug_buffer, info->debug_region,
info->debug_region_size);
/*
* We don't trust firmware to leave NULL terminated last byte (hence
* we have allocated 1 extra 0 byte). Since we cannot guarantee any
* specific data format for debug messages, We just present the data
* in the buffer as is - we expect the messages to be self explanatory.
*/
seq_puts(s, info->debug_buffer);
return 0;
}
/* Provide the log file operations interface*/
DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
/**
* ti_sci_debugfs_create() - Create log debug file
* @pdev: platform device pointer
* @info: Pointer to SCI entity information
*
* Return: 0 if all went fine, else corresponding error.
*/
static int ti_sci_debugfs_create(struct platform_device *pdev,
struct ti_sci_info *info)
{
struct device *dev = &pdev->dev;
struct resource *res;
char debug_name[50] = "ti_sci_debug@";
/* Debug region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"debug_messages");
info->debug_region = devm_ioremap_resource(dev, res);
if (IS_ERR(info->debug_region))
return 0;
info->debug_region_size = resource_size(res);
info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
sizeof(char), GFP_KERNEL);
if (!info->debug_buffer)
return -ENOMEM;
/* Setup NULL termination */
info->debug_buffer[info->debug_region_size] = 0;
info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
sizeof(debug_name) -
sizeof("ti_sci_debug@")),
0444, NULL, info, &ti_sci_debug_fops);
if (IS_ERR(info->d))
return PTR_ERR(info->d);
dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
info->debug_region, info->debug_region_size, res);
return 0;
}
/**
* ti_sci_debugfs_destroy() - clean up log debug file
* @pdev: platform device pointer
* @info: Pointer to SCI entity information
*/
static void ti_sci_debugfs_destroy(struct platform_device *pdev,
struct ti_sci_info *info)
{
if (IS_ERR(info->debug_region))
return;
debugfs_remove(info->d);
}
#else /* CONFIG_DEBUG_FS */
static inline int ti_sci_debugfs_create(struct platform_device *dev,
struct ti_sci_info *info)
{
return 0;
}
static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
struct ti_sci_info *info)
{
}
#endif /* CONFIG_DEBUG_FS */
/**
* ti_sci_dump_header_dbg() - Helper to dump a message header.
* @dev: Device pointer corresponding to the SCI entity
* @hdr: pointer to header.
*/
static inline void ti_sci_dump_header_dbg(struct device *dev,
struct ti_sci_msg_hdr *hdr)
{
dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
hdr->type, hdr->host, hdr->seq, hdr->flags);
}
/**
* ti_sci_rx_callback() - mailbox client callback for receive messages
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
{
struct ti_sci_info *info = cl_to_ti_sci_info(cl);
struct device *dev = info->dev;
struct ti_sci_xfers_info *minfo = &info->minfo;
struct ti_msgmgr_message *mbox_msg = m;
struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
struct ti_sci_xfer *xfer;
u8 xfer_id;
xfer_id = hdr->seq;
/*
* Are we even expecting this?
* NOTE: barriers were implicit in locks used for modifying the bitmap
*/
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "Message for %d is not expected!\n", xfer_id);
return;
}
xfer = &minfo->xfer_block[xfer_id];
/* Is the message of valid length? */
if (mbox_msg->len > info->desc->max_msg_size) {
dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
mbox_msg->len, info->desc->max_msg_size);
ti_sci_dump_header_dbg(dev, hdr);
return;
}
if (mbox_msg->len < xfer->rx_len) {
dev_err(dev, "Recv xfer %zu < expected %d length\n",
mbox_msg->len, xfer->rx_len);
ti_sci_dump_header_dbg(dev, hdr);
return;
}
ti_sci_dump_header_dbg(dev, hdr);
/* Take a copy to the rx buffer.. */
memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
complete(&xfer->done);
}
/**
* ti_sci_get_one_xfer() - Allocate one message
* @info: Pointer to SCI entity information
* @msg_type: Message type
* @msg_flags: Flag to set for the message
* @tx_message_size: transmit message size
* @rx_message_size: receive message size
*
* Helper function which is used by various command functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* This function can sleep depending on pending requests already in the system
* for the SCI entity. Further, this also holds a spinlock to maintain integrity
* of internal data structures.
*
* Return: 0 if all went fine, else corresponding error.
*/
static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
u16 msg_type, u32 msg_flags,
size_t tx_message_size,
size_t rx_message_size)
{
struct ti_sci_xfers_info *minfo = &info->minfo;
struct ti_sci_xfer *xfer;
struct ti_sci_msg_hdr *hdr;
unsigned long flags;
unsigned long bit_pos;
u8 xfer_id;
int ret;
int timeout;
/* Ensure we have sane transfer sizes */
if (rx_message_size > info->desc->max_msg_size ||
tx_message_size > info->desc->max_msg_size ||
rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
return ERR_PTR(-ERANGE);
/*
* Ensure we have only controlled number of pending messages.
* Ideally, we might just have to wait a single message, be
* conservative and wait 5 times that..
*/
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
ret = down_timeout(&minfo->sem_xfer_count, timeout);
if (ret < 0)
return ERR_PTR(ret);
/* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags);
bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
info->desc->max_msgs);
set_bit(bit_pos, minfo->xfer_alloc_table);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
/*
* We already ensured in probe that we can have max messages that can
* fit in hdr.seq - NOTE: this improves access latencies
* to predictable O(1) access, BUT, it opens us to risk if
* remote misbehaves with corrupted message sequence responses.
* If that happens, we are going to be messed up anyways..
*/
xfer_id = (u8)bit_pos;
xfer = &minfo->xfer_block[xfer_id];
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer->tx_message.len = tx_message_size;
xfer->tx_message.chan_rx = info->chan_rx;
xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
xfer->rx_len = (u8)rx_message_size;
reinit_completion(&xfer->done);
hdr->seq = xfer_id;
hdr->type = msg_type;
hdr->host = info->host_id;
hdr->flags = msg_flags;
return xfer;
}
/**
* ti_sci_put_one_xfer() - Release a message
* @minfo: transfer info pointer
* @xfer: message that was reserved by ti_sci_get_one_xfer
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
struct ti_sci_xfer *xfer)
{
unsigned long flags;
struct ti_sci_msg_hdr *hdr;
u8 xfer_id;
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer_id = hdr->seq;
/*
* Keep the locked section as small as possible
* NOTE: we might escape with smp_mb and no lock here..
* but just be conservative and symmetric.
*/
spin_lock_irqsave(&minfo->xfer_lock, flags);
clear_bit(xfer_id, minfo->xfer_alloc_table);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
/* Increment the count for the next user to get through */
up(&minfo->sem_xfer_count);
}
/**
* ti_sci_do_xfer() - Do one transfer
* @info: Pointer to SCI entity information
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
*/
static inline int ti_sci_do_xfer(struct ti_sci_info *info,
struct ti_sci_xfer *xfer)
{
int ret;
int timeout;
struct device *dev = info->dev;
bool done_state = true;
ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
if (ret < 0)
return ret;
ret = 0;
if (system_state <= SYSTEM_RUNNING) {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT;
} else {
/*
* If we are !running, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion.
*/
ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
done_state, 1,
info->desc->max_rx_timeout_ms * 1000,
false, &xfer->done);
}
if (ret == -ETIMEDOUT)
dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(info->chan_tx, ret);
return ret;
}
/**
* ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
* @info: Pointer to SCI entity information
*
* Updates the SCI information in the internal data structure.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
{
struct device *dev = info->dev;
struct ti_sci_handle *handle = &info->handle;
struct ti_sci_version_info *ver = &handle->version;
struct ti_sci_msg_resp_version *rev_info;
struct ti_sci_xfer *xfer;
int ret;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(struct ti_sci_msg_hdr),
sizeof(*rev_info));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
ver->abi_major = rev_info->abi_major;
ver->abi_minor = rev_info->abi_minor;
ver->firmware_revision = rev_info->firmware_revision;
strncpy(ver->firmware_description, rev_info->firmware_description,
sizeof(ver->firmware_description));
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_is_response_ack() - Generic ACK/NACK message checkup
* @r: pointer to response buffer
*
* Return: true if the response was an ACK, else returns false.
*/
static inline bool ti_sci_is_response_ack(void *r)
{
struct ti_sci_msg_hdr *hdr = r;
return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
}
/**
* ti_sci_set_device_state() - Set device state helper
* @handle: pointer to TI SCI handle
* @id: Device identifier
* @flags: flags to setup for the device
* @state: State to move the device to
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
u32 id, u32 flags, u8 state)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_set_device_state *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
req->id = id;
req->state = state;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_get_device_state() - Get device state helper
* @handle: Handle to the device
* @id: Device Identifier
* @clcnt: Pointer to Context Loss Count
* @resets: pointer to resets
* @p_state: pointer to p_state
* @c_state: pointer to c_state
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
u32 id, u32 *clcnt, u32 *resets,
u8 *p_state, u8 *c_state)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_get_device_state *req;
struct ti_sci_msg_resp_get_device_state *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
if (!clcnt && !resets && !p_state && !c_state)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
req->id = id;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
goto fail;
}
if (clcnt)
*clcnt = resp->context_loss_count;
if (resets)
*resets = resp->resets;
if (p_state)
*p_state = resp->programmed_state;
if (c_state)
*c_state = resp->current_state;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_get_device() - command to request for device managed by TISCI
* that can be shared with other hosts.
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Request for the device - NOTE: the client MUST maintain integrity of
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
{
return ti_sci_set_device_state(handle, id, 0,
MSG_DEVICE_SW_STATE_ON);
}
/**
* ti_sci_cmd_get_device_exclusive() - command to request for device managed by
* TISCI that is exclusively owned by the
* requesting host.
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Request for the device - NOTE: the client MUST maintain integrity of
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
u32 id)
{
return ti_sci_set_device_state(handle, id,
MSG_FLAG_DEVICE_EXCLUSIVE,
MSG_DEVICE_SW_STATE_ON);
}
/**
* ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Request for the device - NOTE: the client MUST maintain integrity of
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
{
return ti_sci_set_device_state(handle, id, 0,
MSG_DEVICE_SW_STATE_RETENTION);
}
/**
* ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
* TISCI that is exclusively owned by
* requesting host.
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Request for the device - NOTE: the client MUST maintain integrity of
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
u32 id)
{
return ti_sci_set_device_state(handle, id,
MSG_FLAG_DEVICE_EXCLUSIVE,
MSG_DEVICE_SW_STATE_RETENTION);
}
/**
* ti_sci_cmd_put_device() - command to release a device managed by TISCI
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Request for the device - NOTE: the client MUST maintain integrity of
* usage count by balancing get_device with put_device. No refcounting is
* managed by driver for that purpose.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
{
return ti_sci_set_device_state(handle, id,
0, MSG_DEVICE_SW_STATE_AUTO_OFF);
}
/**
* ti_sci_cmd_dev_is_valid() - Is the device valid
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
*
* Return: 0 if all went fine and the device ID is valid, else return
* appropriate error.
*/
static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
{
u8 unused;
/* check the device state which will also tell us if the ID is valid */
return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
}
/**
* ti_sci_cmd_dev_get_clcnt() - Get context loss counter
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @count: Pointer to Context Loss counter to populate
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
u32 *count)
{
return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
}
/**
* ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @r_state: true if requested to be idle
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
bool *r_state)
{
int ret;
u8 state;
if (!r_state)
return -EINVAL;
ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
if (ret)
return ret;
*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
return 0;
}
/**
* ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @r_state: true if requested to be stopped
* @curr_state: true if currently stopped.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
bool *r_state, bool *curr_state)
{
int ret;
u8 p_state, c_state;
if (!r_state && !curr_state)
return -EINVAL;
ret =
ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
if (ret)
return ret;
if (r_state)
*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
if (curr_state)
*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
return 0;
}
/**
* ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @r_state: true if requested to be ON
* @curr_state: true if currently ON and active
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
bool *r_state, bool *curr_state)
{
int ret;
u8 p_state, c_state;
if (!r_state && !curr_state)
return -EINVAL;
ret =
ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
if (ret)
return ret;
if (r_state)
*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
if (curr_state)
*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
return 0;
}
/**
* ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @curr_state: true if currently transitioning.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
bool *curr_state)
{
int ret;
u8 state;
if (!curr_state)
return -EINVAL;
ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
if (ret)
return ret;
*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
return 0;
}
/**
* ti_sci_cmd_set_device_resets() - command to set resets for device managed
* by TISCI
* @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
* @id: Device Identifier
* @reset_state: Device specific reset bit field
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
u32 id, u32 reset_state)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_set_device_resets *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
req->id = id;
req->resets = reset_state;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_get_device_resets() - Get reset state for device managed
* by TISCI
* @handle: Pointer to TISCI handle
* @id: Device Identifier
* @reset_state: Pointer to reset state to populate
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
u32 id, u32 *reset_state)
{
return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
NULL);
}
/**
* ti_sci_set_clock_state() - Set clock state helper
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @flags: Header flags as needed
* @state: State to request for the clock.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id,
u32 flags, u8 state)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_set_clock_state *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
req->request_state = state;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_get_clock_state() - Get clock state helper
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @programmed_state: State requested for clock to move to
* @current_state: State that the clock is currently in
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id,
u8 *programmed_state, u8 *current_state)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_get_clock_state *req;
struct ti_sci_msg_resp_get_clock_state *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
if (!programmed_state && !current_state)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
goto fail;
}
if (programmed_state)
*programmed_state = resp->programmed_state;
if (current_state)
*current_state = resp->current_state;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
* @can_change_freq: 'true' if frequency change is desired, else 'false'
* @enable_input_term: 'true' if input termination is desired, else 'false'
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
u32 clk_id, bool needs_ssc,
bool can_change_freq, bool enable_input_term)
{
u32 flags = 0;
flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
MSG_CLOCK_SW_STATE_REQ);
}
/**
* ti_sci_cmd_idle_clock() - Idle a clock which is in our control
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
*
* NOTE: This clock must have been requested by get_clock previously.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id)
{
return ti_sci_set_clock_state(handle, dev_id, clk_id,
MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
MSG_CLOCK_SW_STATE_UNREQ);
}
/**
* ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
*
* NOTE: This clock must have been requested by get_clock previously.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id)
{
return ti_sci_set_clock_state(handle, dev_id, clk_id,
MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
MSG_CLOCK_SW_STATE_AUTO);
}
/**
* ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @req_state: state indicating if the clock is auto managed
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, bool *req_state)
{
u8 state = 0;
int ret;
if (!req_state)
return -EINVAL;
ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
if (ret)
return ret;
*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
return 0;
}
/**
* ti_sci_cmd_clk_is_on() - Is the clock ON
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @req_state: state indicating if the clock is managed by us and enabled
* @curr_state: state indicating if the clock is ready for operation
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
u32 clk_id, bool *req_state, bool *curr_state)
{
u8 c_state = 0, r_state = 0;
int ret;
if (!req_state && !curr_state)
return -EINVAL;
ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
&r_state, &c_state);
if (ret)
return ret;
if (req_state)
*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
if (curr_state)
*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
return 0;
}
/**
* ti_sci_cmd_clk_is_off() - Is the clock OFF
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @req_state: state indicating if the clock is managed by us and disabled
* @curr_state: state indicating if the clock is NOT ready for operation
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
u32 clk_id, bool *req_state, bool *curr_state)
{
u8 c_state = 0, r_state = 0;
int ret;
if (!req_state && !curr_state)
return -EINVAL;
ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
&r_state, &c_state);
if (ret)
return ret;
if (req_state)
*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
if (curr_state)
*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
return 0;
}
/**
* ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @parent_id: Parent clock identifier to set
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, u32 parent_id)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_set_clock_parent *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
if (parent_id < 255) {
req->parent_id = parent_id;
} else {
req->parent_id = 255;
req->parent_id_32 = parent_id;
}
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_clk_get_parent() - Get current parent clock source
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @parent_id: Current clock parent
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, u32 *parent_id)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_get_clock_parent *req;
struct ti_sci_msg_resp_get_clock_parent *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle || !parent_id)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
} else {
if (resp->parent_id < 255)
*parent_id = resp->parent_id;
else
*parent_id = resp->parent_id_32;
}
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @num_parents: Returns he number of parents to the current clock.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id,
u32 *num_parents)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_get_clock_num_parents *req;
struct ti_sci_msg_resp_get_clock_num_parents *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle || !num_parents)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
} else {
if (resp->num_parents < 255)
*num_parents = resp->num_parents;
else
*num_parents = resp->num_parents_32;
}
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @min_freq: The minimum allowable frequency in Hz. This is the minimum
* allowable programmed frequency and does not account for clock
* tolerances and jitter.
* @target_freq: The target clock frequency in Hz. A frequency will be
* processed as close to this target frequency as possible.
* @max_freq: The maximum allowable frequency in Hz. This is the maximum
* allowable programmed frequency and does not account for clock
* tolerances and jitter.
* @match_freq: Frequency match in Hz response.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, u64 min_freq,
u64 target_freq, u64 max_freq,
u64 *match_freq)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_query_clock_freq *req;
struct ti_sci_msg_resp_query_clock_freq *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle || !match_freq)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
req->min_freq_hz = min_freq;
req->target_freq_hz = target_freq;
req->max_freq_hz = max_freq;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp))
ret = -ENODEV;
else
*match_freq = resp->freq_hz;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_clk_set_freq() - Set a frequency for clock
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @min_freq: The minimum allowable frequency in Hz. This is the minimum
* allowable programmed frequency and does not account for clock
* tolerances and jitter.
* @target_freq: The target clock frequency in Hz. A frequency will be
* processed as close to this target frequency as possible.
* @max_freq: The maximum allowable frequency in Hz. This is the maximum
* allowable programmed frequency and does not account for clock
* tolerances and jitter.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, u64 min_freq,
u64 target_freq, u64 max_freq)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_set_clock_freq *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
req->min_freq_hz = min_freq;
req->target_freq_hz = target_freq;
req->max_freq_hz = max_freq;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_clk_get_freq() - Get current frequency
* @handle: pointer to TI SCI handle
* @dev_id: Device identifier this request is for
* @clk_id: Clock identifier for the device for this request.
* Each device has it's own set of clock inputs. This indexes
* which clock input to modify.
* @freq: Currently frequency in Hz
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
u32 dev_id, u32 clk_id, u64 *freq)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_get_clock_freq *req;
struct ti_sci_msg_resp_get_clock_freq *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle || !freq)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
req->dev_id = dev_id;
if (clk_id < 255) {
req->clk_id = clk_id;
} else {
req->clk_id = 255;
req->clk_id_32 = clk_id;
}
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp))
ret = -ENODEV;
else
*freq = resp->freq_hz;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
struct ti_sci_msg_req_reboot *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp))
ret = -ENODEV;
else
ret = 0;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_get_resource_range - Helper to get a range of resources assigned
* to a host. Resource is uniquely identified by
* type and subtype.
* @handle: Pointer to TISCI handle.
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
* @desc: Pointer to ti_sci_resource_desc to be updated with the
* resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
struct ti_sci_resource_desc *desc)
{
struct ti_sci_msg_resp_get_resource_range *resp;
struct ti_sci_msg_req_get_resource_range *req;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle || !desc)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
req->secondary_host = s_host;
req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
} else if (!resp->range_num && !resp->range_num_sec) {
/* Neither of the two resource range is valid */
ret = -ENODEV;
} else {
desc->start = resp->range_start;
desc->num = resp->range_num;
desc->start_sec = resp->range_start_sec;
desc->num_sec = resp->range_num_sec;
}
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
* that is same as ti sci interface host.
* @handle: Pointer to TISCI handle.
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @desc: Pointer to ti_sci_resource_desc to be updated with the
* resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype,
struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype,
TI_SCI_IRQ_SECONDARY_HOST_INVALID,
desc);
}
/**
* ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
* assigned to a specified host.
* @handle: Pointer to TISCI handle.
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
* @desc: Pointer to ti_sci_resource_desc to be updated with the
* resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static
int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
}
/**
* ti_sci_manage_irq() - Helper api to configure/release the irq route between
* the requested source and destination
* @handle: Pointer to TISCI handle.
* @valid_params: Bit fields defining the validity of certain params
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @dst_id: Device ID of the IRQ destination
* @dst_host_irq: IRQ number of the destination device
* @ia_id: Device ID of the IA, if the IRQ flows through this IA
* @vint: Virtual interrupt to be used within the IA
* @global_event: Global event number to be used for the requesting event
* @vint_status_bit: Virtual interrupt status bit to be used for the event
* @s_host: Secondary host ID to which the irq/event is being
* requested for.
* @type: Request type irq set or release.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
u32 valid_params, u16 src_id, u16 src_index,
u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
u16 global_event, u8 vint_status_bit, u8 s_host,
u16 type)
{
struct ti_sci_msg_req_manage_irq *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
req->valid_params = valid_params;
req->src_id = src_id;
req->src_index = src_index;
req->dst_id = dst_id;
req->dst_host_irq = dst_host_irq;
req->ia_id = ia_id;
req->vint = vint;
req->global_event = global_event;
req->vint_status_bit = vint_status_bit;
req->secondary_host = s_host;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_set_irq() - Helper api to configure the irq route between the
* requested source and destination
* @handle: Pointer to TISCI handle.
* @valid_params: Bit fields defining the validity of certain params
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @dst_id: Device ID of the IRQ destination
* @dst_host_irq: IRQ number of the destination device
* @ia_id: Device ID of the IA, if the IRQ flows through this IA
* @vint: Virtual interrupt to be used within the IA
* @global_event: Global event number to be used for the requesting event
* @vint_status_bit: Virtual interrupt status bit to be used for the event
* @s_host: Secondary host ID to which the irq/event is being
* requested for.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
u16 src_id, u16 src_index, u16 dst_id,
u16 dst_host_irq, u16 ia_id, u16 vint,
u16 global_event, u8 vint_status_bit, u8 s_host)
{
pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
__func__, valid_params, src_id, src_index,
dst_id, dst_host_irq, ia_id, vint, global_event,
vint_status_bit);
return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
dst_id, dst_host_irq, ia_id, vint,
global_event, vint_status_bit, s_host,
TI_SCI_MSG_SET_IRQ);
}
/**
* ti_sci_free_irq() - Helper api to free the irq route between the
* requested source and destination
* @handle: Pointer to TISCI handle.
* @valid_params: Bit fields defining the validity of certain params
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @dst_id: Device ID of the IRQ destination
* @dst_host_irq: IRQ number of the destination device
* @ia_id: Device ID of the IA, if the IRQ flows through this IA
* @vint: Virtual interrupt to be used within the IA
* @global_event: Global event number to be used for the requesting event
* @vint_status_bit: Virtual interrupt status bit to be used for the event
* @s_host: Secondary host ID to which the irq/event is being
* requested for.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
u16 src_id, u16 src_index, u16 dst_id,
u16 dst_host_irq, u16 ia_id, u16 vint,
u16 global_event, u8 vint_status_bit, u8 s_host)
{
pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
__func__, valid_params, src_id, src_index,
dst_id, dst_host_irq, ia_id, vint, global_event,
vint_status_bit);
return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
dst_id, dst_host_irq, ia_id, vint,
global_event, vint_status_bit, s_host,
TI_SCI_MSG_FREE_IRQ);
}
/**
* ti_sci_cmd_set_irq() - Configure a host irq route between the requested
* source and destination.
* @handle: Pointer to TISCI handle.
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @dst_id: Device ID of the IRQ destination
* @dst_host_irq: IRQ number of the destination device
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
u16 src_index, u16 dst_id, u16 dst_host_irq)
{
u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
dst_host_irq, 0, 0, 0, 0, 0);
}
/**
* ti_sci_cmd_set_event_map() - Configure an event based irq route between the
* requested source and Interrupt Aggregator.
* @handle: Pointer to TISCI handle.
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @ia_id: Device ID of the IA, if the IRQ flows through this IA
* @vint: Virtual interrupt to be used within the IA
* @global_event: Global event number to be used for the requesting event
* @vint_status_bit: Virtual interrupt status bit to be used for the event
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
u16 src_id, u16 src_index, u16 ia_id,
u16 vint, u16 global_event,
u8 vint_status_bit)
{
u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
MSG_FLAG_GLB_EVNT_VALID |
MSG_FLAG_VINT_STS_BIT_VALID;
return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
ia_id, vint, global_event, vint_status_bit, 0);
}
/**
* ti_sci_cmd_free_irq() - Free a host irq route between the between the
* requested source and destination.
* @handle: Pointer to TISCI handle.
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @dst_id: Device ID of the IRQ destination
* @dst_host_irq: IRQ number of the destination device
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
u16 src_index, u16 dst_id, u16 dst_host_irq)
{
u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
dst_host_irq, 0, 0, 0, 0, 0);
}
/**
* ti_sci_cmd_free_event_map() - Free an event map between the requested source
* and Interrupt Aggregator.
* @handle: Pointer to TISCI handle.
* @src_id: Device ID of the IRQ source
* @src_index: IRQ source index within the source device
* @ia_id: Device ID of the IA, if the IRQ flows through this IA
* @vint: Virtual interrupt to be used within the IA
* @global_event: Global event number to be used for the requesting event
* @vint_status_bit: Virtual interrupt status bit to be used for the event
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
u16 src_id, u16 src_index, u16 ia_id,
u16 vint, u16 global_event,
u8 vint_status_bit)
{
u32 valid_params = MSG_FLAG_IA_ID_VALID |
MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
MSG_FLAG_VINT_STS_BIT_VALID;
return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
ia_id, vint, global_event, vint_status_bit, 0);
}
/**
* ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
* @handle: Pointer to TI SCI handle.
* @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
* See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
* more info.
*/
static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
const struct ti_sci_msg_rm_ring_cfg *params)
{
struct ti_sci_msg_rm_ring_cfg_req *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR_OR_NULL(handle))
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
req->valid_params = params->valid_params;
req->nav_id = params->nav_id;
req->index = params->index;
req->addr_lo = params->addr_lo;
req->addr_hi = params->addr_hi;
req->count = params->count;
req->mode = params->mode;
req->size = params->size;
req->order_id = params->order_id;
req->virtid = params->virtid;
req->asel = params->asel;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
return ret;
}
/**
* ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
* @handle: Pointer to TI SCI handle.
* @nav_id: Device ID of Navigator Subsystem which should be used for
* pairing
* @src_thread: Source PSI-L thread ID
* @dst_thread: Destination PSI-L thread ID
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
u32 nav_id, u32 src_thread, u32 dst_thread)
{
struct ti_sci_msg_psil_pair *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
req->nav_id = nav_id;
req->src_thread = src_thread;
req->dst_thread = dst_thread;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
* @handle: Pointer to TI SCI handle.
* @nav_id: Device ID of Navigator Subsystem which should be used for
* unpairing
* @src_thread: Source PSI-L thread ID
* @dst_thread: Destination PSI-L thread ID
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
u32 nav_id, u32 src_thread, u32 dst_thread)
{
struct ti_sci_msg_psil_unpair *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
req->nav_id = nav_id;
req->src_thread = src_thread;
req->dst_thread = dst_thread;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
* @handle: Pointer to TI SCI handle.
* @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
* structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
* See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
* more info.
*/
static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
{
struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR_OR_NULL(handle))
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
req->valid_params = params->valid_params;
req->nav_id = params->nav_id;
req->index = params->index;
req->tx_pause_on_err = params->tx_pause_on_err;
req->tx_filt_einfo = params->tx_filt_einfo;
req->tx_filt_pswords = params->tx_filt_pswords;
req->tx_atype = params->tx_atype;
req->tx_chan_type = params->tx_chan_type;
req->tx_supr_tdpkt = params->tx_supr_tdpkt;
req->tx_fetch_size = params->tx_fetch_size;
req->tx_credit_count = params->tx_credit_count;
req->txcq_qnum = params->txcq_qnum;
req->tx_priority = params->tx_priority;
req->tx_qos = params->tx_qos;
req->tx_orderid = params->tx_orderid;
req->fdepth = params->fdepth;
req->tx_sched_priority = params->tx_sched_priority;
req->tx_burst_size = params->tx_burst_size;
req->tx_tdtype = params->tx_tdtype;
req->extended_ch_type = params->extended_ch_type;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
return ret;
}
/**
* ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
* @handle: Pointer to TI SCI handle.
* @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
* structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
* See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
* more info.
*/
static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
{
struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR_OR_NULL(handle))
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
req->valid_params = params->valid_params;
req->nav_id = params->nav_id;
req->index = params->index;
req->rx_fetch_size = params->rx_fetch_size;
req->rxcq_qnum = params->rxcq_qnum;
req->rx_priority = params->rx_priority;
req->rx_qos = params->rx_qos;
req->rx_orderid = params->rx_orderid;
req->rx_sched_priority = params->rx_sched_priority;
req->flowid_start = params->flowid_start;
req->flowid_cnt = params->flowid_cnt;
req->rx_pause_on_err = params->rx_pause_on_err;
req->rx_atype = params->rx_atype;
req->rx_chan_type = params->rx_chan_type;
req->rx_ignore_short = params->rx_ignore_short;
req->rx_ignore_long = params->rx_ignore_long;
req->rx_burst_size = params->rx_burst_size;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
return ret;
}
/**
* ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
* @handle: Pointer to TI SCI handle.
* @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
* structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
* See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
* more info.
*/
static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
const struct ti_sci_msg_rm_udmap_flow_cfg *params)
{
struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info;
struct device *dev;
int ret = 0;
if (IS_ERR_OR_NULL(handle))
return -EINVAL;
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
req->valid_params = params->valid_params;
req->nav_id = params->nav_id;
req->flow_index = params->flow_index;
req->rx_einfo_present = params->rx_einfo_present;
req->rx_psinfo_present = params->rx_psinfo_present;
req->rx_error_handling = params->rx_error_handling;
req->rx_desc_type = params->rx_desc_type;
req->rx_sop_offset = params->rx_sop_offset;
req->rx_dest_qnum = params->rx_dest_qnum;
req->rx_src_tag_hi = params->rx_src_tag_hi;
req->rx_src_tag_lo = params->rx_src_tag_lo;
req->rx_dest_tag_hi = params->rx_dest_tag_hi;
req->rx_dest_tag_lo = params->rx_dest_tag_lo;
req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
req->rx_fdq1_qnum = params->rx_fdq1_qnum;
req->rx_fdq2_qnum = params->rx_fdq2_qnum;
req->rx_fdq3_qnum = params->rx_fdq3_qnum;
req->rx_ps_location = params->rx_ps_location;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
return ret;
}
/**
* ti_sci_cmd_proc_request() - Command to request a physical processor control
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
u8 proc_id)
{
struct ti_sci_msg_req_proc_request *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
req->processor_id = proc_id;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_proc_release() - Command to release a physical processor control
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
u8 proc_id)
{
struct ti_sci_msg_req_proc_release *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
req->processor_id = proc_id;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_proc_handover() - Command to handover a physical processor
* control to a host in the processor's access
* control list.
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
* @host_id: Host ID to get the control of the processor
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
u8 proc_id, u8 host_id)
{
struct ti_sci_msg_req_proc_handover *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
req->processor_id = proc_id;
req->host_id = host_id;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_proc_set_config() - Command to set the processor boot
* configuration flags
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
* @bootvector: Processor Boot vector (start address)
* @config_flags_set: Configuration flags to be set
* @config_flags_clear: Configuration flags to be cleared.
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
u8 proc_id, u64 bootvector,
u32 config_flags_set,
u32 config_flags_clear)
{
struct ti_sci_msg_req_set_config *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
req->processor_id = proc_id;
req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
TI_SCI_ADDR_HIGH_SHIFT;
req->config_flags_set = config_flags_set;
req->config_flags_clear = config_flags_clear;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_proc_set_control() - Command to set the processor boot
* control flags
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
* @control_flags_set: Control flags to be set
* @control_flags_clear: Control flags to be cleared
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
u8 proc_id, u32 control_flags_set,
u32 control_flags_clear)
{
struct ti_sci_msg_req_set_ctrl *req;
struct ti_sci_msg_hdr *resp;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
req->processor_id = proc_id;
req->control_flags_set = control_flags_set;
req->control_flags_clear = control_flags_clear;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/**
* ti_sci_cmd_proc_get_status() - Command to get the processor boot status
* @handle: Pointer to TI SCI handle
* @proc_id: Processor ID this request is for
* @bv: Processor Boot vector (start address)
* @cfg_flags: Processor specific configuration flags
* @ctrl_flags: Processor specific control flags
* @sts_flags: Processor specific status flags
*
* Return: 0 if all went well, else returns appropriate error value.
*/
static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
u8 proc_id, u64 *bv, u32 *cfg_flags,
u32 *ctrl_flags, u32 *sts_flags)
{
struct ti_sci_msg_resp_get_status *resp;
struct ti_sci_msg_req_get_status *req;
struct ti_sci_info *info;
struct ti_sci_xfer *xfer;
struct device *dev;
int ret = 0;
if (!handle)
return -EINVAL;
if (IS_ERR(handle))
return PTR_ERR(handle);
info = handle_to_ti_sci_info(handle);
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "Message alloc failed(%d)\n", ret);
return ret;
}
req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
req->processor_id = proc_id;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
dev_err(dev, "Mbox send fail %d\n", ret);
goto fail;
}
resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
} else {
*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
(((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
TI_SCI_ADDR_HIGH_MASK);
*cfg_flags = resp->config_flags;
*ctrl_flags = resp->control_flags;
*sts_flags = resp->status_flags;
}
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
return ret;
}
/*
* ti_sci_setup_ops() - Setup the operations structures
* @info: pointer to TISCI pointer
*/
static void ti_sci_setup_ops(struct ti_sci_info *info)
{
struct ti_sci_ops *ops = &info->handle.ops;
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
struct ti_sci_proc_ops *pops = &ops->proc_ops;
core_ops->reboot_device = ti_sci_cmd_core_reboot;
dops->get_device = ti_sci_cmd_get_device;
dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
dops->idle_device = ti_sci_cmd_idle_device;
dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
dops->put_device = ti_sci_cmd_put_device;
dops->is_valid = ti_sci_cmd_dev_is_valid;
dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
dops->is_idle = ti_sci_cmd_dev_is_idle;
dops->is_stop = ti_sci_cmd_dev_is_stop;
dops->is_on = ti_sci_cmd_dev_is_on;
dops->is_transitioning = ti_sci_cmd_dev_is_trans;
dops->set_device_resets = ti_sci_cmd_set_device_resets;
dops->get_device_resets = ti_sci_cmd_get_device_resets;
cops->get_clock = ti_sci_cmd_get_clock;
cops->idle_clock = ti_sci_cmd_idle_clock;
cops->put_clock = ti_sci_cmd_put_clock;
cops->is_auto = ti_sci_cmd_clk_is_auto;
cops->is_on = ti_sci_cmd_clk_is_on;
cops->is_off = ti_sci_cmd_clk_is_off;
cops->set_parent = ti_sci_cmd_clk_set_parent;
cops->get_parent = ti_sci_cmd_clk_get_parent;
cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
rm_core_ops->get_range_from_shost =
ti_sci_cmd_get_resource_range_from_shost;
iops->set_irq = ti_sci_cmd_set_irq;
iops->set_event_map = ti_sci_cmd_set_event_map;
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
psilops->pair = ti_sci_cmd_rm_psil_pair;
psilops->unpair = ti_sci_cmd_rm_psil_unpair;
udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
pops->request = ti_sci_cmd_proc_request;
pops->release = ti_sci_cmd_proc_release;
pops->handover = ti_sci_cmd_proc_handover;
pops->set_config = ti_sci_cmd_proc_set_config;
pops->set_control = ti_sci_cmd_proc_set_control;
pops->get_status = ti_sci_cmd_proc_get_status;
}
/**
* ti_sci_get_handle() - Get the TI SCI handle for a device
* @dev: Pointer to device for which we want SCI handle
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of TI SCI protocol library.
* ti_sci_put_handle must be balanced with successful ti_sci_get_handle
* Return: pointer to handle if successful, else:
* -EPROBE_DEFER if the instance is not ready
* -ENODEV if the required node handler is missing
* -EINVAL if invalid conditions are encountered.
*/
const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
{
struct device_node *ti_sci_np;
struct list_head *p;
struct ti_sci_handle *handle = NULL;
struct ti_sci_info *info;
if (!dev) {
pr_err("I need a device pointer\n");
return ERR_PTR(-EINVAL);
}
ti_sci_np = of_get_parent(dev->of_node);
if (!ti_sci_np) {
dev_err(dev, "No OF information\n");
return ERR_PTR(-EINVAL);
}
mutex_lock(&ti_sci_list_mutex);
list_for_each(p, &ti_sci_list) {
info = list_entry(p, struct ti_sci_info, node);
if (ti_sci_np == info->dev->of_node) {
handle = &info->handle;
info->users++;
break;
}
}
mutex_unlock(&ti_sci_list_mutex);
of_node_put(ti_sci_np);
if (!handle)
return ERR_PTR(-EPROBE_DEFER);
return handle;
}
EXPORT_SYMBOL_GPL(ti_sci_get_handle);
/**
* ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
* @handle: Handle acquired by ti_sci_get_handle
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of TI SCI protocol library.
* ti_sci_put_handle must be balanced with successful ti_sci_get_handle
*
* Return: 0 is successfully released
* if an error pointer was passed, it returns the error value back,
* if null was passed, it returns -EINVAL;
*/
int ti_sci_put_handle(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
if (IS_ERR(handle))
return PTR_ERR(handle);
if (!handle)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
mutex_lock(&ti_sci_list_mutex);
if (!WARN_ON(!info->users))
info->users--;
mutex_unlock(&ti_sci_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(ti_sci_put_handle);
static void devm_ti_sci_release(struct device *dev, void *res)
{
const struct ti_sci_handle **ptr = res;
const struct ti_sci_handle *handle = *ptr;
int ret;
ret = ti_sci_put_handle(handle);
if (ret)
dev_err(dev, "failed to put handle %d\n", ret);
}
/**
* devm_ti_sci_get_handle() - Managed get handle
* @dev: device for which we want SCI handle for.
*
* NOTE: This releases the handle once the device resources are
* no longer needed. MUST NOT BE released with ti_sci_put_handle.
* The function does not track individual clients of the framework
* and is expected to be maintained by caller of TI SCI protocol library.
*
* Return: 0 if all went fine, else corresponding error.
*/
const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
{
const struct ti_sci_handle **ptr;
const struct ti_sci_handle *handle;
ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
handle = ti_sci_get_handle(dev);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return handle;
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
/**
* ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
* @np: device node
* @property: property name containing phandle on TISCI node
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of TI SCI protocol library.
* ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
* Return: pointer to handle if successful, else:
* -EPROBE_DEFER if the instance is not ready
* -ENODEV if the required node handler is missing
* -EINVAL if invalid conditions are encountered.
*/
const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
const char *property)
{
struct ti_sci_handle *handle = NULL;
struct device_node *ti_sci_np;
struct ti_sci_info *info;
struct list_head *p;
if (!np) {
pr_err("I need a device pointer\n");
return ERR_PTR(-EINVAL);
}
ti_sci_np = of_parse_phandle(np, property, 0);
if (!ti_sci_np)
return ERR_PTR(-ENODEV);
mutex_lock(&ti_sci_list_mutex);
list_for_each(p, &ti_sci_list) {
info = list_entry(p, struct ti_sci_info, node);
if (ti_sci_np == info->dev->of_node) {
handle = &info->handle;
info->users++;
break;
}
}
mutex_unlock(&ti_sci_list_mutex);
of_node_put(ti_sci_np);
if (!handle)
return ERR_PTR(-EPROBE_DEFER);
return handle;
}
EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
/**
* devm_ti_sci_get_by_phandle() - Managed get handle using phandle
* @dev: Device pointer requesting TISCI handle
* @property: property name containing phandle on TISCI node
*
* NOTE: This releases the handle once the device resources are
* no longer needed. MUST NOT BE released with ti_sci_put_handle.
* The function does not track individual clients of the framework
* and is expected to be maintained by caller of TI SCI protocol library.
*
* Return: 0 if all went fine, else corresponding error.
*/
const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
const char *property)
{
const struct ti_sci_handle *handle;
const struct ti_sci_handle **ptr;
ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
if (!IS_ERR(handle)) {
*ptr = handle;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return handle;
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
/**
* ti_sci_get_free_resource() - Get a free resource from TISCI resource.
* @res: Pointer to the TISCI resource
*
* Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
*/
u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
{
unsigned long flags;
u16 set, free_bit;
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
struct ti_sci_resource_desc *desc = &res->desc[set];
int res_count = desc->num + desc->num_sec;
free_bit = find_first_zero_bit(desc->res_map, res_count);
if (free_bit != res_count) {
__set_bit(free_bit, desc->res_map);
raw_spin_unlock_irqrestore(&res->lock, flags);
if (desc->num && free_bit < desc->num)
return desc->start + free_bit;
else
return desc->start_sec + free_bit;
}
}
raw_spin_unlock_irqrestore(&res->lock, flags);
return TI_SCI_RESOURCE_NULL;
}
EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
/**
* ti_sci_release_resource() - Release a resource from TISCI resource.
* @res: Pointer to the TISCI resource
* @id: Resource id to be released.
*/
void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
{
unsigned long flags;
u16 set;
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
struct ti_sci_resource_desc *desc = &res->desc[set];
if (desc->num && desc->start <= id &&
(desc->start + desc->num) > id)
__clear_bit(id - desc->start, desc->res_map);
else if (desc->num_sec && desc->start_sec <= id &&
(desc->start_sec + desc->num_sec) > id)
__clear_bit(id - desc->start_sec, desc->res_map);
}
raw_spin_unlock_irqrestore(&res->lock, flags);
}
EXPORT_SYMBOL_GPL(ti_sci_release_resource);
/**
* ti_sci_get_num_resources() - Get the number of resources in TISCI resource
* @res: Pointer to the TISCI resource
*
* Return: Total number of available resources.
*/
u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
{
u32 set, count = 0;
for (set = 0; set < res->sets; set++)
count += res->desc[set].num + res->desc[set].num_sec;
return count;
}
EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
/**
* devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
* @handle: TISCI handle
* @dev: Device pointer to which the resource is assigned
* @dev_id: TISCI device id to which the resource is assigned
* @sub_types: Array of sub_types assigned corresponding to device
* @sets: Number of sub_types
*
* Return: Pointer to ti_sci_resource if all went well else appropriate
* error pointer.
*/
static struct ti_sci_resource *
devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, u32 *sub_types,
u32 sets)
{
struct ti_sci_resource *res;
bool valid_set = false;
int i, ret, res_count;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
res->sets = sets;
res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
GFP_KERNEL);
if (!res->desc)
return ERR_PTR(-ENOMEM);
for (i = 0; i < res->sets; i++) {
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
sub_types[i],
&res->desc[i]);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, sub_types[i]);
memset(&res->desc[i], 0, sizeof(res->desc[i]));
continue;
}
dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
dev_id, sub_types[i], res->desc[i].start,
res->desc[i].num, res->desc[i].start_sec,
res->desc[i].num_sec);
valid_set = true;
res_count = res->desc[i].num + res->desc[i].num_sec;
res->desc[i].res_map = devm_bitmap_zalloc(dev, res_count,
GFP_KERNEL);
if (!res->desc[i].res_map)
return ERR_PTR(-ENOMEM);
}
raw_spin_lock_init(&res->lock);
if (valid_set)
return res;
return ERR_PTR(-EINVAL);
}
/**
* devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
* @handle: TISCI handle
* @dev: Device pointer to which the resource is assigned
* @dev_id: TISCI device id to which the resource is assigned
* @of_prop: property name by which the resource are represented
*
* Return: Pointer to ti_sci_resource if all went well else appropriate
* error pointer.
*/
struct ti_sci_resource *
devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
struct device *dev, u32 dev_id, char *of_prop)
{
struct ti_sci_resource *res;
u32 *sub_types;
int sets;
sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
sizeof(u32));
if (sets < 0) {
dev_err(dev, "%s resource type ids not available\n", of_prop);
return ERR_PTR(sets);
}
sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
if (!sub_types)
return ERR_PTR(-ENOMEM);
of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
sets);
kfree(sub_types);
return res;
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
/**
* devm_ti_sci_get_resource() - Get a resource range assigned to the device
* @handle: TISCI handle
* @dev: Device pointer to which the resource is assigned
* @dev_id: TISCI device id to which the resource is assigned
* @sub_type: TISCI resource subytpe representing the resource.
*
* Return: Pointer to ti_sci_resource if all went well else appropriate
* error pointer.
*/
struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
u32 dev_id, u32 sub_type)
{
return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
const struct ti_sci_handle *handle = &info->handle;
ti_sci_cmd_core_reboot(handle);
/* call fail OR pass, we should not be here in the first place */
return NOTIFY_BAD;
}
/* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2,
/* Conservative duration */
.max_rx_timeout_ms = 1000,
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 64,
};
/* Description for AM654 */
static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
.default_host_id = 12,
/* Conservative duration */
.max_rx_timeout_ms = 10000,
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 60,
};
static const struct of_device_id ti_sci_of_match[] = {
{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_of_match);
static int ti_sci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
const struct ti_sci_desc *desc;
struct ti_sci_xfer *xfer;
struct ti_sci_info *info = NULL;
struct ti_sci_xfers_info *minfo;
struct mbox_client *cl;
int ret = -EINVAL;
int i;
int reboot = 0;
u32 h_id;
of_id = of_match_device(ti_sci_of_match, dev);
if (!of_id) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
desc = of_id->data;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
info->desc = desc;
ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
/* if the property is not present in DT, use a default from desc */
if (ret < 0) {
info->host_id = info->desc->default_host_id;
} else {
if (!h_id) {
dev_warn(dev, "Host ID 0 is reserved for firmware\n");
info->host_id = info->desc->default_host_id;
} else {
info->host_id = h_id;
}
}
reboot = of_property_read_bool(dev->of_node,
"ti,system-reboot-controller");
INIT_LIST_HEAD(&info->node);
minfo = &info->minfo;
/*
* Pre-allocate messages
* NEVER allocate more than what we can indicate in hdr.seq
* if we have data description bug, force a fix..
*/
if (WARN_ON(desc->max_msgs >=
1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
return -EINVAL;
minfo->xfer_block = devm_kcalloc(dev,
desc->max_msgs,
sizeof(*minfo->xfer_block),
GFP_KERNEL);
if (!minfo->xfer_block)
return -ENOMEM;
minfo->xfer_alloc_table = devm_bitmap_zalloc(dev,
desc->max_msgs,
GFP_KERNEL);
if (!minfo->xfer_alloc_table)
return -ENOMEM;
/* Pre-initialize the buffer pointer to pre-allocated buffers */
for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
GFP_KERNEL);
if (!xfer->xfer_buf)
return -ENOMEM;
xfer->tx_message.buf = xfer->xfer_buf;
init_completion(&xfer->done);
}
ret = ti_sci_debugfs_create(pdev, info);
if (ret)
dev_warn(dev, "Failed to create debug file\n");
platform_set_drvdata(pdev, info);
cl = &info->cl;
cl->dev = dev;
cl->tx_block = false;
cl->rx_callback = ti_sci_rx_callback;
cl->knows_txdone = true;
spin_lock_init(&minfo->xfer_lock);
sema_init(&minfo->sem_xfer_count, desc->max_msgs);
info->chan_rx = mbox_request_channel_byname(cl, "rx");
if (IS_ERR(info->chan_rx)) {
ret = PTR_ERR(info->chan_rx);
goto out;
}
info->chan_tx = mbox_request_channel_byname(cl, "tx");
if (IS_ERR(info->chan_tx)) {
ret = PTR_ERR(info->chan_tx);
goto out;
}
ret = ti_sci_cmd_get_revision(info);
if (ret) {
dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
goto out;
}
ti_sci_setup_ops(info);
if (reboot) {
info->nb.notifier_call = tisci_reboot_handler;
info->nb.priority = 128;
ret = register_restart_handler(&info->nb);
if (ret) {
dev_err(dev, "reboot registration fail(%d)\n", ret);
goto out;
}
}
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision,
info->handle.version.firmware_description);
mutex_lock(&ti_sci_list_mutex);
list_add_tail(&info->node, &ti_sci_list);
mutex_unlock(&ti_sci_list_mutex);
return of_platform_populate(dev->of_node, NULL, NULL, dev);
out:
if (!IS_ERR(info->chan_tx))
mbox_free_channel(info->chan_tx);
if (!IS_ERR(info->chan_rx))
mbox_free_channel(info->chan_rx);
debugfs_remove(info->d);
return ret;
}
static int ti_sci_remove(struct platform_device *pdev)
{
struct ti_sci_info *info;
struct device *dev = &pdev->dev;
int ret = 0;
of_platform_depopulate(dev);
info = platform_get_drvdata(pdev);
if (info->nb.notifier_call)
unregister_restart_handler(&info->nb);
mutex_lock(&ti_sci_list_mutex);
if (info->users)
ret = -EBUSY;
else
list_del(&info->node);
mutex_unlock(&ti_sci_list_mutex);
if (!ret) {
ti_sci_debugfs_destroy(pdev, info);
/* Safe to free channels since no more users */
mbox_free_channel(info->chan_tx);
mbox_free_channel(info->chan_rx);
}
return ret;
}
static struct platform_driver ti_sci_driver = {
.probe = ti_sci_probe,
.remove = ti_sci_remove,
.driver = {
.name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match),
},
};
module_platform_driver(ti_sci_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
MODULE_AUTHOR("Nishanth Menon");
MODULE_ALIAS("platform:ti-sci");
| linux-master | drivers/firmware/ti_sci.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
static DEFINE_MUTEX(qcom_scm_lock);
/**
* struct arm_smccc_args
* @args: The array of values used in registers in smc instruction
*/
struct arm_smccc_args {
unsigned long args[8];
};
/**
* struct scm_legacy_command - one SCM command buffer
* @len: total available memory for command and response
* @buf_offset: start of command buffer
* @resp_hdr_offset: start of response buffer
* @id: command to be executed
* @buf: buffer returned from scm_legacy_get_command_buffer()
*
* An SCM command is laid out in memory as follows:
*
* ------------------- <--- struct scm_legacy_command
* | command header |
* ------------------- <--- scm_legacy_get_command_buffer()
* | command buffer |
* ------------------- <--- struct scm_legacy_response and
* | response header | scm_legacy_command_to_response()
* ------------------- <--- scm_legacy_get_response_buffer()
* | response buffer |
* -------------------
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate scm_legacy_get_*_buffer() routines
* to access the buffers in a safe manner.
*/
struct scm_legacy_command {
__le32 len;
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
__le32 buf[];
};
/**
* struct scm_legacy_response - one SCM response buffer
* @len: total available memory for response
* @buf_offset: start of response data relative to start of scm_legacy_response
* @is_complete: indicates if the command has finished processing
*/
struct scm_legacy_response {
__le32 len;
__le32 buf_offset;
__le32 is_complete;
};
/**
* scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response
* @cmd: command
*
* Returns a pointer to a response for a command.
*/
static inline struct scm_legacy_response *scm_legacy_command_to_response(
const struct scm_legacy_command *cmd)
{
return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
}
/**
* scm_legacy_get_command_buffer() - Get a pointer to a command buffer
* @cmd: command
*
* Returns a pointer to the command buffer of a command.
*/
static inline void *scm_legacy_get_command_buffer(
const struct scm_legacy_command *cmd)
{
return (void *)cmd->buf;
}
/**
* scm_legacy_get_response_buffer() - Get a pointer to a response buffer
* @rsp: response
*
* Returns a pointer to a response buffer of a response.
*/
static inline void *scm_legacy_get_response_buffer(
const struct scm_legacy_response *rsp)
{
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
static void __scm_legacy_do(const struct arm_smccc_args *smc,
struct arm_smccc_res *res)
{
do {
arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2],
smc->args[3], smc->args[4], smc->args[5],
smc->args[6], smc->args[7], res);
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
/**
* scm_legacy_call() - Sends a command to the SCM and waits for the command to
* finish processing.
* @dev: device
* @desc: descriptor structure containing arguments and return values
* @res: results from SMC call
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking qcom_scm_call and invalidated in the cache
* immediately after qcom_scm_call returns. Cache maintenance on the command
* and response buffers is taken care of by qcom_scm_call; however, callers are
* responsible for any other cached buffers passed over to the secure world.
*/
int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
u8 arglen = desc->arginfo & 0xf;
int ret = 0, context_id;
unsigned int i;
struct scm_legacy_command *cmd;
struct scm_legacy_response *rsp;
struct arm_smccc_args smc = {0};
struct arm_smccc_res smc_res;
const size_t cmd_len = arglen * sizeof(__le32);
const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32);
size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
dma_addr_t cmd_phys;
__le32 *arg_buf;
const __le32 *res_buf;
cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->len = cpu_to_le32(alloc_len);
cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd));
arg_buf = scm_legacy_get_command_buffer(cmd);
for (i = 0; i < arglen; i++)
arg_buf[i] = cpu_to_le32(desc->args[i]);
rsp = scm_legacy_command_to_response(cmd);
cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, cmd_phys)) {
kfree(cmd);
return -ENOMEM;
}
smc.args[0] = 1;
smc.args[1] = (unsigned long)&context_id;
smc.args[2] = cmd_phys;
mutex_lock(&qcom_scm_lock);
__scm_legacy_do(&smc, &smc_res);
if (smc_res.a0)
ret = qcom_scm_remap_error(smc_res.a0);
mutex_unlock(&qcom_scm_lock);
if (ret)
goto out;
do {
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
sizeof(*rsp), DMA_FROM_DEVICE);
} while (!rsp->is_complete);
dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
le32_to_cpu(rsp->buf_offset),
resp_len, DMA_FROM_DEVICE);
if (res) {
res_buf = scm_legacy_get_response_buffer(rsp);
for (i = 0; i < MAX_QCOM_SCM_RETS; i++)
res->result[i] = le32_to_cpu(res_buf[i]);
}
out:
dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
kfree(cmd);
return ret;
}
#define SCM_LEGACY_ATOMIC_N_REG_ARGS 5
#define SCM_LEGACY_ATOMIC_FIRST_REG_IDX 2
#define SCM_LEGACY_CLASS_REGISTER (0x2 << 8)
#define SCM_LEGACY_MASK_IRQS BIT(5)
#define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \
((SCM_LEGACY_FNID(svc, cmd) << 12) | \
SCM_LEGACY_CLASS_REGISTER | \
SCM_LEGACY_MASK_IRQS | \
(n & 0xf))
/**
* scm_legacy_call_atomic() - Send an atomic SCM command with up to 5 arguments
* and 3 return values
* @unused: device, legacy argument, not used, can be NULL
* @desc: SCM call descriptor containing arguments
* @res: SCM call return values
*
* This shall only be used with commands that are guaranteed to be
* uninterruptable, atomic and SMP safe.
*/
int scm_legacy_call_atomic(struct device *unused,
const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
int context_id;
struct arm_smccc_res smc_res;
size_t arglen = desc->arginfo & 0xf;
BUG_ON(arglen > SCM_LEGACY_ATOMIC_N_REG_ARGS);
arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(desc->svc, desc->cmd, arglen),
(unsigned long)&context_id,
desc->args[0], desc->args[1], desc->args[2],
desc->args[3], desc->args[4], 0, &smc_res);
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
res->result[2] = smc_res.a3;
}
return smc_res.a0;
}
| linux-master | drivers/firmware/qcom_scm-legacy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2010 Red Hat, Inc.
* by Peter Jones <[email protected]>
* Copyright 2008 IBM, Inc.
* by Konrad Rzeszutek <[email protected]>
* Copyright 2008
* by Konrad Rzeszutek <[email protected]>
*
* This code exposes the iSCSI Boot Format Table to userland via sysfs.
*
* Changelog:
*
* 06 Jan 2010 - Peter Jones <[email protected]>
* New changelog entries are in the git log from now on. Not here.
*
* 14 Mar 2008 - Konrad Rzeszutek <[email protected]>
* Updated comments and copyrights. (v0.4.9)
*
* 11 Feb 2008 - Konrad Rzeszutek <[email protected]>
* Converted to using ibft_addr. (v0.4.8)
*
* 8 Feb 2008 - Konrad Rzeszutek <[email protected]>
* Combined two functions in one: reserve_ibft_region. (v0.4.7)
*
* 30 Jan 2008 - Konrad Rzeszutek <[email protected]>
* Added logic to handle IPv6 addresses. (v0.4.6)
*
* 25 Jan 2008 - Konrad Rzeszutek <[email protected]>
* Added logic to handle badly not-to-spec iBFT. (v0.4.5)
*
* 4 Jan 2008 - Konrad Rzeszutek <[email protected]>
* Added __init to function declarations. (v0.4.4)
*
* 21 Dec 2007 - Konrad Rzeszutek <[email protected]>
* Updated kobject registration, combined unregister functions in one
* and code and style cleanup. (v0.4.3)
*
* 5 Dec 2007 - Konrad Rzeszutek <[email protected]>
* Added end-markers to enums and re-organized kobject registration. (v0.4.2)
*
* 4 Dec 2007 - Konrad Rzeszutek <[email protected]>
* Created 'device' sysfs link to the NIC and style cleanup. (v0.4.1)
*
* 28 Nov 2007 - Konrad Rzeszutek <[email protected]>
* Added sysfs-ibft documentation, moved 'find_ibft' function to
* in its own file and added text attributes for every struct field. (v0.4)
*
* 21 Nov 2007 - Konrad Rzeszutek <[email protected]>
* Added text attributes emulating OpenFirmware /proc/device-tree naming.
* Removed binary /sysfs interface (v0.3)
*
* 29 Aug 2007 - Konrad Rzeszutek <[email protected]>
* Added functionality in setup.c to reserve iBFT region. (v0.2)
*
* 27 Aug 2007 - Konrad Rzeszutek <[email protected]>
* First version exposing iBFT data via a binary /sysfs. (v0.1)
*/
#include <linux/blkdev.h>
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/iscsi_ibft.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/iscsi_boot_sysfs.h>
#define IBFT_ISCSI_VERSION "0.5.0"
#define IBFT_ISCSI_DATE "2010-Feb-25"
MODULE_AUTHOR("Peter Jones <[email protected]> and "
"Konrad Rzeszutek <[email protected]>");
MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBFT_ISCSI_VERSION);
static struct acpi_table_ibft *ibft_addr;
struct ibft_hdr {
u8 id;
u8 version;
u16 length;
u8 index;
u8 flags;
} __attribute__((__packed__));
struct ibft_control {
struct ibft_hdr hdr;
u16 extensions;
u16 initiator_off;
u16 nic0_off;
u16 tgt0_off;
u16 nic1_off;
u16 tgt1_off;
u16 expansion[];
} __attribute__((__packed__));
struct ibft_initiator {
struct ibft_hdr hdr;
char isns_server[16];
char slp_server[16];
char pri_radius_server[16];
char sec_radius_server[16];
u16 initiator_name_len;
u16 initiator_name_off;
} __attribute__((__packed__));
struct ibft_nic {
struct ibft_hdr hdr;
char ip_addr[16];
u8 subnet_mask_prefix;
u8 origin;
char gateway[16];
char primary_dns[16];
char secondary_dns[16];
char dhcp[16];
u16 vlan;
char mac[6];
u16 pci_bdf;
u16 hostname_len;
u16 hostname_off;
} __attribute__((__packed__));
struct ibft_tgt {
struct ibft_hdr hdr;
char ip_addr[16];
u16 port;
char lun[8];
u8 chap_type;
u8 nic_assoc;
u16 tgt_name_len;
u16 tgt_name_off;
u16 chap_name_len;
u16 chap_name_off;
u16 chap_secret_len;
u16 chap_secret_off;
u16 rev_chap_name_len;
u16 rev_chap_name_off;
u16 rev_chap_secret_len;
u16 rev_chap_secret_off;
} __attribute__((__packed__));
/*
* The kobject different types and its names.
*
*/
enum ibft_id {
id_reserved = 0, /* We don't support. */
id_control = 1, /* Should show up only once and is not exported. */
id_initiator = 2,
id_nic = 3,
id_target = 4,
id_extensions = 5, /* We don't support. */
id_end_marker,
};
/*
* The kobject and attribute structures.
*/
struct ibft_kobject {
struct acpi_table_ibft *header;
union {
struct ibft_initiator *initiator;
struct ibft_nic *nic;
struct ibft_tgt *tgt;
struct ibft_hdr *hdr;
};
};
static struct iscsi_boot_kset *boot_kset;
/* fully null address */
static const char nulls[16];
/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00 };
static int address_not_null(u8 *ip)
{
return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
}
/*
* Helper functions to parse data properly.
*/
static ssize_t sprintf_ipaddr(char *buf, u8 *ip)
{
char *str = buf;
if (ip[0] == 0 && ip[1] == 0 && ip[2] == 0 && ip[3] == 0 &&
ip[4] == 0 && ip[5] == 0 && ip[6] == 0 && ip[7] == 0 &&
ip[8] == 0 && ip[9] == 0 && ip[10] == 0xff && ip[11] == 0xff) {
/*
* IPV4
*/
str += sprintf(buf, "%pI4", ip + 12);
} else {
/*
* IPv6
*/
str += sprintf(str, "%pI6", ip);
}
str += sprintf(str, "\n");
return str - buf;
}
static ssize_t sprintf_string(char *str, int len, char *buf)
{
return sprintf(str, "%.*s\n", len, buf);
}
/*
* Helper function to verify the IBFT header.
*/
static int ibft_verify_hdr(char *t, struct ibft_hdr *hdr, int id, int length)
{
if (hdr->id != id) {
printk(KERN_ERR "iBFT error: We expected the %s " \
"field header.id to have %d but " \
"found %d instead!\n", t, id, hdr->id);
return -ENODEV;
}
if (length && hdr->length != length) {
printk(KERN_ERR "iBFT error: We expected the %s " \
"field header.length to have %d but " \
"found %d instead!\n", t, length, hdr->length);
return -ENODEV;
}
return 0;
}
/*
* Routines for parsing the iBFT data to be human readable.
*/
static ssize_t ibft_attr_show_initiator(void *data, int type, char *buf)
{
struct ibft_kobject *entry = data;
struct ibft_initiator *initiator = entry->initiator;
void *ibft_loc = entry->header;
char *str = buf;
if (!initiator)
return 0;
switch (type) {
case ISCSI_BOOT_INI_INDEX:
str += sprintf(str, "%d\n", initiator->hdr.index);
break;
case ISCSI_BOOT_INI_FLAGS:
str += sprintf(str, "%d\n", initiator->hdr.flags);
break;
case ISCSI_BOOT_INI_ISNS_SERVER:
str += sprintf_ipaddr(str, initiator->isns_server);
break;
case ISCSI_BOOT_INI_SLP_SERVER:
str += sprintf_ipaddr(str, initiator->slp_server);
break;
case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
str += sprintf_ipaddr(str, initiator->pri_radius_server);
break;
case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
str += sprintf_ipaddr(str, initiator->sec_radius_server);
break;
case ISCSI_BOOT_INI_INITIATOR_NAME:
str += sprintf_string(str, initiator->initiator_name_len,
(char *)ibft_loc +
initiator->initiator_name_off);
break;
default:
break;
}
return str - buf;
}
static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
{
struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
void *ibft_loc = entry->header;
char *str = buf;
__be32 val;
if (!nic)
return 0;
switch (type) {
case ISCSI_BOOT_ETH_INDEX:
str += sprintf(str, "%d\n", nic->hdr.index);
break;
case ISCSI_BOOT_ETH_FLAGS:
str += sprintf(str, "%d\n", nic->hdr.flags);
break;
case ISCSI_BOOT_ETH_IP_ADDR:
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
break;
case ISCSI_BOOT_ETH_ORIGIN:
str += sprintf(str, "%d\n", nic->origin);
break;
case ISCSI_BOOT_ETH_GATEWAY:
str += sprintf_ipaddr(str, nic->gateway);
break;
case ISCSI_BOOT_ETH_PRIMARY_DNS:
str += sprintf_ipaddr(str, nic->primary_dns);
break;
case ISCSI_BOOT_ETH_SECONDARY_DNS:
str += sprintf_ipaddr(str, nic->secondary_dns);
break;
case ISCSI_BOOT_ETH_DHCP:
str += sprintf_ipaddr(str, nic->dhcp);
break;
case ISCSI_BOOT_ETH_VLAN:
str += sprintf(str, "%d\n", nic->vlan);
break;
case ISCSI_BOOT_ETH_MAC:
str += sprintf(str, "%pM\n", nic->mac);
break;
case ISCSI_BOOT_ETH_HOSTNAME:
str += sprintf_string(str, nic->hostname_len,
(char *)ibft_loc + nic->hostname_off);
break;
default:
break;
}
return str - buf;
};
static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
{
struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
void *ibft_loc = entry->header;
char *str = buf;
int i;
if (!tgt)
return 0;
switch (type) {
case ISCSI_BOOT_TGT_INDEX:
str += sprintf(str, "%d\n", tgt->hdr.index);
break;
case ISCSI_BOOT_TGT_FLAGS:
str += sprintf(str, "%d\n", tgt->hdr.flags);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
str += sprintf_ipaddr(str, tgt->ip_addr);
break;
case ISCSI_BOOT_TGT_PORT:
str += sprintf(str, "%d\n", tgt->port);
break;
case ISCSI_BOOT_TGT_LUN:
for (i = 0; i < 8; i++)
str += sprintf(str, "%x", (u8)tgt->lun[i]);
str += sprintf(str, "\n");
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
str += sprintf(str, "%d\n", tgt->nic_assoc);
break;
case ISCSI_BOOT_TGT_CHAP_TYPE:
str += sprintf(str, "%d\n", tgt->chap_type);
break;
case ISCSI_BOOT_TGT_NAME:
str += sprintf_string(str, tgt->tgt_name_len,
(char *)ibft_loc + tgt->tgt_name_off);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
str += sprintf_string(str, tgt->chap_name_len,
(char *)ibft_loc + tgt->chap_name_off);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
str += sprintf_string(str, tgt->chap_secret_len,
(char *)ibft_loc + tgt->chap_secret_off);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
str += sprintf_string(str, tgt->rev_chap_name_len,
(char *)ibft_loc +
tgt->rev_chap_name_off);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
str += sprintf_string(str, tgt->rev_chap_secret_len,
(char *)ibft_loc +
tgt->rev_chap_secret_off);
break;
default:
break;
}
return str - buf;
}
static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
{
struct ibft_kobject *entry = data;
char *str = buf;
switch (type) {
case ISCSI_BOOT_ACPITBL_SIGNATURE:
str += sprintf_string(str, ACPI_NAMESEG_SIZE,
entry->header->header.signature);
break;
case ISCSI_BOOT_ACPITBL_OEM_ID:
str += sprintf_string(str, ACPI_OEM_ID_SIZE,
entry->header->header.oem_id);
break;
case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
entry->header->header.oem_table_id);
break;
default:
break;
}
return str - buf;
}
static int __init ibft_check_device(void)
{
int len;
u8 *pos;
u8 csum = 0;
len = ibft_addr->header.length;
/* Sanity checking of iBFT. */
if (ibft_addr->header.revision != 1) {
printk(KERN_ERR "iBFT module supports only revision 1, " \
"while this is %d.\n",
ibft_addr->header.revision);
return -ENOENT;
}
for (pos = (u8 *)ibft_addr; pos < (u8 *)ibft_addr + len; pos++)
csum += *pos;
if (csum) {
printk(KERN_ERR "iBFT has incorrect checksum (0x%x)!\n", csum);
return -ENOENT;
}
return 0;
}
/*
* Helper routiners to check to determine if the entry is valid
* in the proper iBFT structure.
*/
static umode_t ibft_check_nic_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_ETH_INDEX:
case ISCSI_BOOT_ETH_FLAGS:
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_IP_ADDR:
if (address_not_null(nic->ip_addr))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
case ISCSI_BOOT_ETH_SUBNET_MASK:
if (nic->subnet_mask_prefix)
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_ORIGIN:
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_GATEWAY:
if (address_not_null(nic->gateway))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_PRIMARY_DNS:
if (address_not_null(nic->primary_dns))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_SECONDARY_DNS:
if (address_not_null(nic->secondary_dns))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_DHCP:
if (address_not_null(nic->dhcp))
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_VLAN:
case ISCSI_BOOT_ETH_MAC:
rc = S_IRUGO;
break;
case ISCSI_BOOT_ETH_HOSTNAME:
if (nic->hostname_off)
rc = S_IRUGO;
break;
default:
break;
}
return rc;
}
static umode_t __init ibft_check_tgt_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_TGT_INDEX:
case ISCSI_BOOT_TGT_FLAGS:
case ISCSI_BOOT_TGT_IP_ADDR:
case ISCSI_BOOT_TGT_PORT:
case ISCSI_BOOT_TGT_LUN:
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_CHAP_TYPE:
rc = S_IRUGO;
break;
case ISCSI_BOOT_TGT_NAME:
if (tgt->tgt_name_len)
rc = S_IRUGO;
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
case ISCSI_BOOT_TGT_CHAP_SECRET:
if (tgt->chap_name_len)
rc = S_IRUGO;
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
if (tgt->rev_chap_name_len)
rc = S_IRUGO;
break;
default:
break;
}
return rc;
}
static umode_t __init ibft_check_initiator_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_initiator *init = entry->initiator;
umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_INI_INDEX:
case ISCSI_BOOT_INI_FLAGS:
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_ISNS_SERVER:
if (address_not_null(init->isns_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_SLP_SERVER:
if (address_not_null(init->slp_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
if (address_not_null(init->pri_radius_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
if (address_not_null(init->sec_radius_server))
rc = S_IRUGO;
break;
case ISCSI_BOOT_INI_INITIATOR_NAME:
if (init->initiator_name_len)
rc = S_IRUGO;
break;
default:
break;
}
return rc;
}
static umode_t __init ibft_check_acpitbl_for(void *data, int type)
{
umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_ACPITBL_SIGNATURE:
case ISCSI_BOOT_ACPITBL_OEM_ID:
case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
rc = S_IRUGO;
break;
default:
break;
}
return rc;
}
static void ibft_kobj_release(void *data)
{
kfree(data);
}
/*
* Helper function for ibft_register_kobjects.
*/
static int __init ibft_create_kobject(struct acpi_table_ibft *header,
struct ibft_hdr *hdr)
{
struct iscsi_boot_kobj *boot_kobj = NULL;
struct ibft_kobject *ibft_kobj = NULL;
struct ibft_nic *nic = (struct ibft_nic *)hdr;
struct pci_dev *pci_dev;
int rc = 0;
ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
if (!ibft_kobj)
return -ENOMEM;
ibft_kobj->header = header;
ibft_kobj->hdr = hdr;
switch (hdr->id) {
case id_initiator:
rc = ibft_verify_hdr("initiator", hdr, id_initiator,
sizeof(*ibft_kobj->initiator));
if (rc)
break;
boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_initiator,
ibft_check_initiator_for,
ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
}
break;
case id_nic:
rc = ibft_verify_hdr("ethernet", hdr, id_nic,
sizeof(*ibft_kobj->nic));
if (rc)
break;
boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_nic,
ibft_check_nic_for,
ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
}
break;
case id_target:
rc = ibft_verify_hdr("target", hdr, id_target,
sizeof(*ibft_kobj->tgt));
if (rc)
break;
boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_target,
ibft_check_tgt_for,
ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
}
break;
case id_reserved:
case id_control:
case id_extensions:
/* Fields which we don't support. Ignore them */
rc = 1;
break;
default:
printk(KERN_ERR "iBFT has unknown structure type (%d). " \
"Report this bug to %.6s!\n", hdr->id,
header->header.oem_id);
rc = 1;
break;
}
if (rc) {
/* Skip adding this kobject, but exit with non-fatal error. */
rc = 0;
goto free_ibft_obj;
}
if (hdr->id == id_nic) {
/*
* We don't search for the device in other domains than
* zero. This is because on x86 platforms the BIOS
* executes only devices which are in domain 0. Furthermore, the
* iBFT spec doesn't have a domain id field :-(
*/
pci_dev = pci_get_domain_bus_and_slot(0,
(nic->pci_bdf & 0xff00) >> 8,
(nic->pci_bdf & 0xff));
if (pci_dev) {
rc = sysfs_create_link(&boot_kobj->kobj,
&pci_dev->dev.kobj, "device");
pci_dev_put(pci_dev);
}
}
return 0;
free_ibft_obj:
kfree(ibft_kobj);
return rc;
}
/*
* Scan the IBFT table structure for the NIC and Target fields. When
* found add them on the passed-in list. We do not support the other
* fields at this point, so they are skipped.
*/
static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
{
struct ibft_control *control = NULL;
struct iscsi_boot_kobj *boot_kobj;
struct ibft_kobject *ibft_kobj;
void *ptr, *end;
int rc = 0;
u16 offset;
u16 eot_offset;
control = (void *)header + sizeof(*header);
end = (void *)control + control->hdr.length;
eot_offset = (void *)header + header->header.length - (void *)control;
rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 0);
/* iBFT table safety checking */
rc |= ((control->hdr.index) ? -ENODEV : 0);
rc |= ((control->hdr.length < sizeof(*control)) ? -ENODEV : 0);
if (rc) {
printk(KERN_ERR "iBFT error: Control header is invalid!\n");
return rc;
}
for (ptr = &control->initiator_off; ptr + sizeof(u16) <= end; ptr += sizeof(u16)) {
offset = *(u16 *)ptr;
if (offset && offset < header->header.length &&
offset < eot_offset) {
rc = ibft_create_kobject(header,
(void *)header + offset);
if (rc)
break;
}
}
if (rc)
return rc;
ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
if (!ibft_kobj)
return -ENOMEM;
ibft_kobj->header = header;
ibft_kobj->hdr = NULL; /*for ibft_unregister*/
boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
ibft_kobj,
ibft_attr_show_acpitbl,
ibft_check_acpitbl_for,
ibft_kobj_release);
if (!boot_kobj) {
kfree(ibft_kobj);
rc = -ENOMEM;
}
return rc;
}
static void ibft_unregister(void)
{
struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
struct ibft_kobject *ibft_kobj;
list_for_each_entry_safe(boot_kobj, tmp_kobj,
&boot_kset->kobj_list, list) {
ibft_kobj = boot_kobj->data;
if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
sysfs_remove_link(&boot_kobj->kobj, "device");
};
}
static void ibft_cleanup(void)
{
if (boot_kset) {
ibft_unregister();
iscsi_boot_destroy_kset(boot_kset);
}
}
static void __exit ibft_exit(void)
{
ibft_cleanup();
}
#ifdef CONFIG_ACPI
static const struct {
char *sign;
} ibft_signs[] = {
/*
* One spec says "IBFT", the other says "iBFT". We have to check
* for both.
*/
{ ACPI_SIG_IBFT },
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
static void __init acpi_find_ibft_region(void)
{
int i;
struct acpi_table_header *table = NULL;
if (acpi_disabled)
return;
for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
acpi_get_table(ibft_signs[i].sign, 0, &table);
ibft_addr = (struct acpi_table_ibft *)table;
}
}
#else
static void __init acpi_find_ibft_region(void)
{
}
#endif
#ifdef CONFIG_ISCSI_IBFT_FIND
static int __init acpi_find_isa_region(void)
{
if (ibft_phys_addr) {
ibft_addr = isa_bus_to_virt(ibft_phys_addr);
return 0;
}
return -ENODEV;
}
#else
static int __init acpi_find_isa_region(void)
{
return -ENODEV;
}
#endif
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
static int __init ibft_init(void)
{
int rc = 0;
/*
As on UEFI systems the setup_arch()/reserve_ibft_region()
is called before ACPI tables are parsed and it only does
legacy finding.
*/
if (acpi_find_isa_region())
acpi_find_ibft_region();
if (ibft_addr) {
pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
return rc;
boot_kset = iscsi_boot_create_kset("ibft");
if (!boot_kset)
return -ENOMEM;
/* Scan the IBFT for data and register the kobjects. */
rc = ibft_register_kobjects(ibft_addr);
if (rc)
goto out_free;
} else
printk(KERN_INFO "No iBFT detected.\n");
return 0;
out_free:
ibft_cleanup();
return rc;
}
module_init(ibft_init);
module_exit(ibft_exit);
| linux-master | drivers/firmware/iscsi_ibft.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic System Framebuffers
* Copyright (c) 2012-2013 David Herrmann <[email protected]>
*/
/*
* Simple-Framebuffer support
* Create a platform-device for any available boot framebuffer. The
* simple-framebuffer platform device is already available on DT systems, so
* this module parses the global "screen_info" object and creates a suitable
* platform device compatible with the "simple-framebuffer" DT object. If
* the framebuffer is incompatible, we instead create a legacy
* "vesa-framebuffer", "efi-framebuffer" or "platform-framebuffer" device and
* pass the screen_info as platform_data. This allows legacy drivers
* to pick these devices up without messing with simple-framebuffer drivers.
* The global "screen_info" is still valid at all times.
*
* If CONFIG_SYSFB_SIMPLEFB is not selected, never register "simple-framebuffer"
* platform devices, but only use legacy framebuffer devices for
* backwards compatibility.
*
* TODO: We set the dev_id field of all platform-devices to 0. This allows
* other OF/DT parsers to create such devices, too. However, they must
* start at offset 1 for this to work.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/sysfb.h>
static struct platform_device *pd;
static DEFINE_MUTEX(disable_lock);
static bool disabled;
static bool sysfb_unregister(void)
{
if (IS_ERR_OR_NULL(pd))
return false;
platform_device_unregister(pd);
pd = NULL;
return true;
}
/**
* sysfb_disable() - disable the Generic System Framebuffers support
*
* This disables the registration of system framebuffer devices that match the
* generic drivers that make use of the system framebuffer set up by firmware.
*
* It also unregisters a device if this was already registered by sysfb_init().
*
* Context: The function can sleep. A @disable_lock mutex is acquired to serialize
* against sysfb_init(), that registers a system framebuffer device.
*/
void sysfb_disable(void)
{
mutex_lock(&disable_lock);
sysfb_unregister();
disabled = true;
mutex_unlock(&disable_lock);
}
EXPORT_SYMBOL_GPL(sysfb_disable);
static __init int sysfb_init(void)
{
struct screen_info *si = &screen_info;
struct simplefb_platform_data mode;
const char *name;
bool compatible;
int ret = 0;
mutex_lock(&disable_lock);
if (disabled)
goto unlock_mutex;
sysfb_apply_efi_quirks();
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
pd = sysfb_create_simplefb(si, &mode);
if (!IS_ERR(pd))
goto unlock_mutex;
}
/* if the FB is incompatible, create a legacy framebuffer device */
if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
name = "efi-framebuffer";
else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
name = "vesa-framebuffer";
else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
name = "vga-framebuffer";
else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
name = "ega-framebuffer";
else
name = "platform-framebuffer";
pd = platform_device_alloc(name, 0);
if (!pd) {
ret = -ENOMEM;
goto unlock_mutex;
}
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
goto err;
ret = platform_device_add(pd);
if (ret)
goto err;
goto unlock_mutex;
err:
platform_device_put(pd);
unlock_mutex:
mutex_unlock(&disable_lock);
return ret;
}
/* must execute after PCI subsystem for EFI quirks */
subsys_initcall_sync(sysfb_init);
| linux-master | drivers/firmware/sysfb.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/random.h>
#include <asm/dmi.h>
#include <asm/unaligned.h>
#ifndef SMBIOS_ENTRY_POINT_SCAN_START
#define SMBIOS_ENTRY_POINT_SCAN_START 0xF0000
#endif
struct kobject *dmi_kobj;
EXPORT_SYMBOL_GPL(dmi_kobj);
/*
* DMI stands for "Desktop Management Interface". It is part
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: https://www.dmtf.org/standards
*/
static const char dmi_empty_string[] = "";
static u32 dmi_ver __initdata;
static u32 dmi_len;
static u16 dmi_num;
static u8 smbios_entry_point[32];
static int smbios_entry_point_size;
/* DMI system identification string used during boot */
static char dmi_ids_string[128] __initdata;
static struct dmi_memdev_info {
const char *device;
const char *bank;
u64 size; /* bytes */
u16 handle;
u8 type; /* DDR2, DDR3, DDR4 etc */
} *dmi_memdev;
static int dmi_memdev_nr;
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
const u8 *nsp;
if (s) {
while (--s > 0 && *bp)
bp += strlen(bp) + 1;
/* Strings containing only spaces are considered empty */
nsp = bp;
while (*nsp == ' ')
nsp++;
if (*nsp != '\0')
return bp;
}
return dmi_empty_string;
}
static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
{
const char *bp = dmi_string_nosave(dm, s);
char *str;
size_t len;
if (bp == dmi_empty_string)
return dmi_empty_string;
len = strlen(bp) + 1;
str = dmi_alloc(len);
if (str != NULL)
strcpy(str, bp);
return str;
}
/*
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
static void dmi_decode_table(u8 *buf,
void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *data = buf;
int i = 0;
/*
* Stop when we have seen all the items the table claimed to have
* (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
* >= 3.0 only) OR we run off the end of the table (should never
* happen but sometimes does on bogus implementations.)
*/
while ((!dmi_num || i < dmi_num) &&
(data - buf + sizeof(struct dmi_header)) <= dmi_len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
* We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the
* table in dmi_decode or dmi_string
*/
data += dm->length;
while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
data++;
if (data - buf < dmi_len - 1)
decode(dm, private_data);
data += 2;
i++;
/*
* 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
* For tables behind a 64-bit entry point, we have no item
* count and no exact table length, so stop on end-of-table
* marker. For tables behind a 32-bit entry point, we have
* seen OEM structures behind the end-of-table marker on
* some systems, so don't trust it.
*/
if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
break;
}
/* Trim DMI table length if needed */
if (dmi_len > data - buf)
dmi_len = data - buf;
}
static phys_addr_t dmi_base;
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
void *))
{
u8 *buf;
u32 orig_dmi_len = dmi_len;
buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL)
return -ENOMEM;
dmi_decode_table(buf, decode, NULL);
add_device_randomness(buf, dmi_len);
dmi_early_unmap(buf, orig_dmi_len);
return 0;
}
static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
}
static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
EXPORT_SYMBOL_GPL(dmi_available);
/*
* Save a DMI string
*/
static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
int string)
{
const char *d = (const char *) dm;
const char *p;
if (dmi_ident[slot] || dm->length <= string)
return;
p = dmi_string(dm, d[string]);
if (p == NULL)
return;
dmi_ident[slot] = p;
}
static void __init dmi_save_release(const struct dmi_header *dm, int slot,
int index)
{
const u8 *minor, *major;
char *s;
/* If the table doesn't have the field, let's return */
if (dmi_ident[slot] || dm->length < index)
return;
minor = (u8 *) dm + index;
major = (u8 *) dm + index - 1;
/* As per the spec, if the system doesn't support this field,
* the value is FF
*/
if (*major == 0xFF && *minor == 0xFF)
return;
s = dmi_alloc(8);
if (!s)
return;
sprintf(s, "%u.%u", *major, *minor);
dmi_ident[slot] = s;
}
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d;
char *s;
int is_ff = 1, is_00 = 1, i;
if (dmi_ident[slot] || dm->length < index + 16)
return;
d = (u8 *) dm + index;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00)
is_00 = 0;
if (d[i] != 0xFF)
is_ff = 0;
}
if (is_ff || is_00)
return;
s = dmi_alloc(16*2+4+1);
if (!s)
return;
/*
* As of version 2.6 of the SMBIOS specification, the first 3 fields of
* the UUID are supposed to be little-endian encoded. The specification
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x020600)
sprintf(s, "%pUl", d);
else
sprintf(s, "%pUb", d);
dmi_ident[slot] = s;
}
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d;
char *s;
if (dmi_ident[slot] || dm->length <= index)
return;
s = dmi_alloc(4);
if (!s)
return;
d = (u8 *) dm + index;
sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s;
}
static void __init dmi_save_one_device(int type, const char *name)
{
struct dmi_device *dev;
/* No duplicate device */
if (dmi_find_device(type, name, NULL))
return;
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
if (!dev)
return;
dev->type = type;
strcpy((char *)(dev + 1), name);
dev->name = (char *)(dev + 1);
dev->device_data = NULL;
list_add(&dev->list, &dmi_devices);
}
static void __init dmi_save_devices(const struct dmi_header *dm)
{
int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
for (i = 0; i < count; i++) {
const char *d = (char *)(dm + 1) + (i * 2);
/* Skip disabled device */
if ((*d & 0x80) == 0)
continue;
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
}
}
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{
int i, count;
struct dmi_device *dev;
if (dm->length < 0x05)
return;
count = *(u8 *)(dm + 1);
for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i);
if (devname == dmi_empty_string)
continue;
dev = dmi_alloc(sizeof(*dev));
if (!dev)
break;
dev->type = DMI_DEV_TYPE_OEM_STRING;
dev->name = devname;
dev->device_data = NULL;
list_add(&dev->list, &dmi_devices);
}
}
static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
{
struct dmi_device *dev;
void *data;
data = dmi_alloc(dm->length);
if (data == NULL)
return;
memcpy(data, dm, dm->length);
dev = dmi_alloc(sizeof(*dev));
if (!dev)
return;
dev->type = DMI_DEV_TYPE_IPMI;
dev->name = "IPMI controller";
dev->device_data = data;
list_add_tail(&dev->list, &dmi_devices);
}
static void __init dmi_save_dev_pciaddr(int instance, int segment, int bus,
int devfn, const char *name, int type)
{
struct dmi_dev_onboard *dev;
/* Ignore invalid values */
if (type == DMI_DEV_TYPE_DEV_SLOT &&
segment == 0xFFFF && bus == 0xFF && devfn == 0xFF)
return;
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
if (!dev)
return;
dev->instance = instance;
dev->segment = segment;
dev->bus = bus;
dev->devfn = devfn;
strcpy((char *)&dev[1], name);
dev->dev.type = type;
dev->dev.name = (char *)&dev[1];
dev->dev.device_data = dev;
list_add(&dev->dev.list, &dmi_devices);
}
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
const char *name;
const u8 *d = (u8 *)dm;
if (dm->length < 0x0B)
return;
/* Skip disabled device */
if ((d[0x5] & 0x80) == 0)
return;
name = dmi_string_nosave(dm, d[0x4]);
dmi_save_dev_pciaddr(d[0x6], *(u16 *)(d + 0x7), d[0x9], d[0xA], name,
DMI_DEV_TYPE_DEV_ONBOARD);
dmi_save_one_device(d[0x5] & 0x7f, name);
}
static void __init dmi_save_system_slot(const struct dmi_header *dm)
{
const u8 *d = (u8 *)dm;
/* Need SMBIOS 2.6+ structure */
if (dm->length < 0x11)
return;
dmi_save_dev_pciaddr(*(u16 *)(d + 0x9), *(u16 *)(d + 0xD), d[0xF],
d[0x10], dmi_string_nosave(dm, d[0x4]),
DMI_DEV_TYPE_DEV_SLOT);
}
static void __init count_mem_devices(const struct dmi_header *dm, void *v)
{
if (dm->type != DMI_ENTRY_MEM_DEVICE)
return;
dmi_memdev_nr++;
}
static void __init save_mem_devices(const struct dmi_header *dm, void *v)
{
const char *d = (const char *)dm;
static int nr;
u64 bytes;
u16 size;
if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
return;
}
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
dmi_memdev[nr].type = d[0x12];
size = get_unaligned((u16 *)&d[0xC]);
if (size == 0)
bytes = 0;
else if (size == 0xffff)
bytes = ~0ull;
else if (size & 0x8000)
bytes = (u64)(size & 0x7fff) << 10;
else if (size != 0x7fff || dm->length < 0x20)
bytes = (u64)size << 20;
else
bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
dmi_memdev[nr].size = bytes;
nr++;
}
static void __init dmi_memdev_walk(void)
{
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
dmi_walk_early(save_mem_devices);
}
}
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
* out of here.
*/
static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
break;
case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
dmi_save_ident(dm, DMI_BOARD_NAME, 5);
dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
break;
case 3: /* Chassis Information */
dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
break;
case 9: /* System Slots */
dmi_save_system_slot(dm);
break;
case 10: /* Onboard Devices Information */
dmi_save_devices(dm);
break;
case 11: /* OEM Strings */
dmi_save_oem_strings_devices(dm);
break;
case 38: /* IPMI Device Information */
dmi_save_ipmi_device(dm);
break;
case 41: /* Onboard Devices Extended Information */
dmi_save_extended_devices(dm);
}
}
static int __init print_filtered(char *buf, size_t len, const char *info)
{
int c = 0;
const char *p;
if (!info)
return c;
for (p = info; *p; p++)
if (isprint(*p))
c += scnprintf(buf + c, len - c, "%c", *p);
else
c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
return c;
}
static void __init dmi_format_ids(char *buf, size_t len)
{
int c = 0;
const char *board; /* Board Name is optional */
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_SYS_VENDOR));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_PRODUCT_NAME));
board = dmi_get_system_info(DMI_BOARD_NAME);
if (board) {
c += scnprintf(buf + c, len - c, "/");
c += print_filtered(buf + c, len - c, board);
}
c += scnprintf(buf + c, len - c, ", BIOS ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_VERSION));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_DATE));
}
/*
* Check for DMI/SMBIOS headers in the system firmware image. Any
* SMBIOS header must start 16 bytes before the DMI header, so take a
* 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
* 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
* takes precedence) and return 0. Otherwise return 1.
*/
static int __init dmi_present(const u8 *buf)
{
u32 smbios_ver;
/*
* The size of this structure is 31 bytes, but we also accept value
* 30 due to a mistake in SMBIOS specification version 2.1.
*/
if (memcmp(buf, "_SM_", 4) == 0 &&
buf[5] >= 30 && buf[5] <= 32 &&
dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
smbios_entry_point_size = buf[5];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
/* Some BIOS report weird SMBIOS version, fix that up */
switch (smbios_ver) {
case 0x021F:
case 0x0221:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
smbios_ver & 0xFF, 3);
smbios_ver = 0x0203;
break;
case 0x0233:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
smbios_ver = 0x0206;
break;
}
} else {
smbios_ver = 0;
}
buf += 16;
if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
if (smbios_ver)
dmi_ver = smbios_ver;
else
dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
dmi_ver <<= 8;
dmi_num = get_unaligned_le16(buf + 12);
dmi_len = get_unaligned_le16(buf + 6);
dmi_base = get_unaligned_le32(buf + 8);
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
} else {
smbios_entry_point_size = 15;
memcpy(smbios_entry_point, buf,
smbios_entry_point_size);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
return 1;
}
/*
* Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
* 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
*/
static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] >= 24 && buf[6] <= 32 &&
dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be24(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
smbios_entry_point_size = buf[6];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
if (dmi_walk_early(dmi_decode) == 0) {
pr_info("SMBIOS %d.%d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
return 1;
}
static void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
if (efi_enabled(EFI_CONFIG_TABLES)) {
/*
* According to the DMTF SMBIOS reference spec v3.0.0, it is
* allowed to define both the 64-bit entry point (smbios3) and
* the 32-bit entry point (smbios), in which case they should
* either both point to the same SMBIOS structure table, or the
* table pointed to by the 64-bit entry point should contain a
* superset of the table contents pointed to by the 32-bit entry
* point (section 5.2)
* This implies that the 64-bit entry point should have
* precedence if it is defined and supported by the OS. If we
* have the 64-bit entry point, but fail to decode it, fall
* back to the legacy one (if available)
*/
if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
p = dmi_early_remap(efi.smbios3, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
if (!dmi_smbios3_present(buf)) {
dmi_available = 1;
return;
}
}
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
/* This is called as a core_initcall() because it isn't
* needed during early boot. This also means we can
* iounmap the space when we're done with it.
*/
p = dmi_early_remap(efi.smbios, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
if (!dmi_present(buf)) {
dmi_available = 1;
return;
}
} else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
p = dmi_early_remap(SMBIOS_ENTRY_POINT_SCAN_START, 0x10000);
if (p == NULL)
goto error;
/*
* Same logic as above, look for a 64-bit entry point
* first, and if not found, fall back to 32-bit entry point.
*/
memcpy_fromio(buf, p, 16);
for (q = p + 16; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
if (!dmi_smbios3_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
return;
}
memcpy(buf, buf + 16, 16);
}
/*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
* out-of-range bytes so there is no chance of falsely
* detecting an SMBIOS header.
*/
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
if (!dmi_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
return;
}
memcpy(buf, buf + 16, 16);
}
dmi_early_unmap(p, 0x10000);
}
error:
pr_info("DMI not present or invalid.\n");
}
static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
memcpy(buf, attr->private + pos, count);
return count;
}
static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
static int __init dmi_init(void)
{
struct kobject *tables_kobj;
u8 *dmi_table;
int ret = -ENOMEM;
if (!dmi_available)
return 0;
/*
* Set up dmi directory at /sys/firmware/dmi. This entry should stay
* even after farther error, as it can be used by other modules like
* dmi-sysfs.
*/
dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
if (!dmi_kobj)
goto err;
tables_kobj = kobject_create_and_add("tables", dmi_kobj);
if (!tables_kobj)
goto err;
dmi_table = dmi_remap(dmi_base, dmi_len);
if (!dmi_table)
goto err_tables;
bin_attr_smbios_entry_point.size = smbios_entry_point_size;
bin_attr_smbios_entry_point.private = smbios_entry_point;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
if (ret)
goto err_unmap;
bin_attr_DMI.size = dmi_len;
bin_attr_DMI.private = dmi_table;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
if (!ret)
return 0;
sysfs_remove_bin_file(tables_kobj,
&bin_attr_smbios_entry_point);
err_unmap:
dmi_unmap(dmi_table);
err_tables:
kobject_del(tables_kobj);
kobject_put(tables_kobj);
err:
pr_err("dmi: Firmware registration failed.\n");
return ret;
}
subsys_initcall(dmi_init);
/**
* dmi_setup - scan and setup DMI system information
*
* Scan the DMI system information. This setups DMI identifiers
* (dmi_system_id) for printing it out on task dumps and prepares
* DIMM entry information (dmi_memdev_info) from the SMBIOS table
* for using this when reporting memory errors.
*/
void __init dmi_setup(void)
{
dmi_scan_machine();
if (!dmi_available)
return;
dmi_memdev_walk();
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
/**
* dmi_matches - check if dmi_system_id structure matches system DMI data
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_matches(const struct dmi_system_id *dmi)
{
int i;
for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
if (s == DMI_OEM_STRING) {
/* DMI_OEM_STRING must be exact match */
const struct dmi_device *valid;
valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
dmi->matches[i].substr, NULL);
if (valid)
continue;
} else if (dmi_ident[s]) {
if (dmi->matches[i].exact_match) {
if (!strcmp(dmi_ident[s],
dmi->matches[i].substr))
continue;
} else {
if (strstr(dmi_ident[s],
dmi->matches[i].substr))
continue;
}
}
/* No match */
return false;
}
return true;
}
/**
* dmi_is_end_of_table - check for end-of-table marker
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
{
return dmi->matches[0].slot == DMI_NONE;
}
/**
* dmi_check_system - check system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table running matching functions until someone
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*
* dmi_setup must be called before this function is called.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
int count = 0;
const struct dmi_system_id *d;
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d)) {
count++;
if (d->callback && d->callback(d))
break;
}
return count;
}
EXPORT_SYMBOL(dmi_check_system);
/**
* dmi_first_match - find dmi_system_id structure matching system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*
* dmi_setup must be called before this function is called.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
const struct dmi_system_id *d;
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d))
return d;
return NULL;
}
EXPORT_SYMBOL(dmi_first_match);
/**
* dmi_get_system_info - return DMI data value
* @field: data index (see enum dmi_field)
*
* Returns one DMI data value, can be used to perform
* complex DMI data checks.
*/
const char *dmi_get_system_info(int field)
{
return dmi_ident[field];
}
EXPORT_SYMBOL(dmi_get_system_info);
/**
* dmi_name_in_serial - Check if string is in the DMI product serial information
* @str: string to check for
*/
int dmi_name_in_serial(const char *str)
{
int f = DMI_PRODUCT_SERIAL;
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
return 0;
}
/**
* dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
* @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
int i;
for (i = 0; fields[i] != DMI_NONE; i++) {
int f = fields[i];
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
}
return 0;
}
EXPORT_SYMBOL(dmi_name_in_vendors);
/**
* dmi_find_device - find onboard device by type/name
* @type: device type or %DMI_DEV_TYPE_ANY to match all device types
* @name: device name string or %NULL to match all
* @from: previous device found in search, or %NULL for new search.
*
* Iterates through the list of known onboard devices. If a device is
* found with a matching @type and @name, a pointer to its device
* structure is returned. Otherwise, %NULL is returned.
* A new search is initiated by passing %NULL as the @from argument.
* If @from is not %NULL, searches continue from next device.
*/
const struct dmi_device *dmi_find_device(int type, const char *name,
const struct dmi_device *from)
{
const struct list_head *head = from ? &from->list : &dmi_devices;
struct list_head *d;
for (d = head->next; d != &dmi_devices; d = d->next) {
const struct dmi_device *dev =
list_entry(d, struct dmi_device, list);
if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
((name == NULL) || (strcmp(dev->name, name) == 0)))
return dev;
}
return NULL;
}
EXPORT_SYMBOL(dmi_find_device);
/**
* dmi_get_date - parse a DMI date
* @field: data index (see enum dmi_field)
* @yearp: optional out parameter for the year
* @monthp: optional out parameter for the month
* @dayp: optional out parameter for the day
*
* The date field is assumed to be in the form resembling
* [mm[/dd]]/yy[yy] and the result is stored in the out
* parameters any or all of which can be omitted.
*
* If the field doesn't exist, all out parameters are set to zero
* and false is returned. Otherwise, true is returned with any
* invalid part of date set to zero.
*
* On return, year, month and day are guaranteed to be in the
* range of [0,9999], [0,12] and [0,31] respectively.
*/
bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
int year = 0, month = 0, day = 0;
bool exists;
const char *s, *y;
char *e;
s = dmi_get_system_info(field);
exists = s;
if (!exists)
goto out;
/*
* Determine year first. We assume the date string resembles
* mm/dd/yy[yy] but the original code extracted only the year
* from the end. Keep the behavior in the spirit of no
* surprises.
*/
y = strrchr(s, '/');
if (!y)
goto out;
y++;
year = simple_strtoul(y, &e, 10);
if (y != e && year < 100) { /* 2-digit year */
year += 1900;
if (year < 1996) /* no dates < spec 1.0 */
year += 100;
}
if (year > 9999) /* year should fit in %04d */
year = 0;
/* parse the mm and dd */
month = simple_strtoul(s, &e, 10);
if (s == e || *e != '/' || !month || month > 12) {
month = 0;
goto out;
}
s = e + 1;
day = simple_strtoul(s, &e, 10);
if (s == y || s == e || *e != '/' || day > 31)
day = 0;
out:
if (yearp)
*yearp = year;
if (monthp)
*monthp = month;
if (dayp)
*dayp = day;
return exists;
}
EXPORT_SYMBOL(dmi_get_date);
/**
* dmi_get_bios_year - get a year out of DMI_BIOS_DATE field
*
* Returns year on success, -ENXIO if DMI is not selected,
* or a different negative error code if DMI field is not present
* or not parseable.
*/
int dmi_get_bios_year(void)
{
bool exists;
int year;
exists = dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
if (!exists)
return -ENODATA;
return year ? year : -ERANGE;
}
EXPORT_SYMBOL(dmi_get_bios_year);
/**
* dmi_walk - Walk the DMI table and get called back for every record
* @decode: Callback function
* @private_data: Private data to be passed to the callback function
*
* Returns 0 on success, -ENXIO if DMI is not selected or not present,
* or a different negative error code if DMI walking fails.
*/
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *buf;
if (!dmi_available)
return -ENXIO;
buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
return -ENOMEM;
dmi_decode_table(buf, decode, private_data);
dmi_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(dmi_walk);
/**
* dmi_match - compare a string to the dmi field (if exists)
* @f: DMI field identifier
* @str: string to compare the DMI field to
*
* Returns true if the requested field equals to the str (including NULL).
*/
bool dmi_match(enum dmi_field f, const char *str)
{
const char *info = dmi_get_system_info(f);
if (info == NULL || str == NULL)
return info == str;
return !strcmp(info, str);
}
EXPORT_SYMBOL_GPL(dmi_match);
void dmi_memdev_name(u16 handle, const char **bank, const char **device)
{
int n;
if (dmi_memdev == NULL)
return;
for (n = 0; n < dmi_memdev_nr; n++) {
if (handle == dmi_memdev[n].handle) {
*bank = dmi_memdev[n].bank;
*device = dmi_memdev[n].device;
break;
}
}
}
EXPORT_SYMBOL_GPL(dmi_memdev_name);
u64 dmi_memdev_size(u16 handle)
{
int n;
if (dmi_memdev) {
for (n = 0; n < dmi_memdev_nr; n++) {
if (handle == dmi_memdev[n].handle)
return dmi_memdev[n].size;
}
}
return ~0ull;
}
EXPORT_SYMBOL_GPL(dmi_memdev_size);
/**
* dmi_memdev_type - get the memory type
* @handle: DMI structure handle
*
* Return the DMI memory type of the module in the slot associated with the
* given DMI handle, or 0x0 if no such DMI handle exists.
*/
u8 dmi_memdev_type(u16 handle)
{
int n;
if (dmi_memdev) {
for (n = 0; n < dmi_memdev_nr; n++) {
if (handle == dmi_memdev[n].handle)
return dmi_memdev[n].type;
}
}
return 0x0; /* Not a valid value */
}
EXPORT_SYMBOL_GPL(dmi_memdev_type);
/**
* dmi_memdev_handle - get the DMI handle of a memory slot
* @slot: slot number
*
* Return the DMI handle associated with a given memory slot, or %0xFFFF
* if there is no such slot.
*/
u16 dmi_memdev_handle(int slot)
{
if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
return dmi_memdev[slot].handle;
return 0xffff; /* Not a valid value */
}
EXPORT_SYMBOL_GPL(dmi_memdev_handle);
| linux-master | drivers/firmware/dmi_scan.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* System Control and Power Interface (SCPI) Message Protocol driver
*
* SCPI Message Protocol is used between the System Control Processor(SCP)
* and the Application Processors(AP). The Message Handling Unit(MHU)
* provides a mechanism for inter-processor communication between SCP's
* Cortex M3 and AP.
*
* SCP offers control and management of the core/cluster power states,
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
* Copyright (C) 2015 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/printk.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/spinlock.h>
#define CMD_ID_MASK GENMASK(6, 0)
#define CMD_TOKEN_ID_MASK GENMASK(15, 8)
#define CMD_DATA_SIZE_MASK GENMASK(24, 16)
#define CMD_LEGACY_DATA_SIZE_MASK GENMASK(28, 20)
#define PACK_SCPI_CMD(cmd_id, tx_sz) \
(FIELD_PREP(CMD_ID_MASK, cmd_id) | \
FIELD_PREP(CMD_DATA_SIZE_MASK, tx_sz))
#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \
(FIELD_PREP(CMD_ID_MASK, cmd_id) | \
FIELD_PREP(CMD_LEGACY_DATA_SIZE_MASK, tx_sz))
#define CMD_SIZE(cmd) FIELD_GET(CMD_DATA_SIZE_MASK, cmd)
#define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK | CMD_ID_MASK)
#define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK)
#define SCPI_SLOT 0
#define MAX_DVFS_DOMAINS 8
#define MAX_DVFS_OPPS 16
#define PROTO_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTO_REV_MINOR_MASK GENMASK(15, 0)
#define FW_REV_MAJOR_MASK GENMASK(31, 24)
#define FW_REV_MINOR_MASK GENMASK(23, 16)
#define FW_REV_PATCH_MASK GENMASK(15, 0)
#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
enum scpi_error_codes {
SCPI_SUCCESS = 0, /* Success */
SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
SCPI_ERR_ALIGN = 2, /* Invalid alignment */
SCPI_ERR_SIZE = 3, /* Invalid size */
SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
SCPI_ERR_RANGE = 6, /* Value out of range */
SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
SCPI_ERR_DEVICE = 11, /* Device error */
SCPI_ERR_BUSY = 12, /* Device busy */
SCPI_ERR_MAX
};
/* SCPI Standard commands */
enum scpi_std_cmd {
SCPI_CMD_INVALID = 0x00,
SCPI_CMD_SCPI_READY = 0x01,
SCPI_CMD_SCPI_CAPABILITIES = 0x02,
SCPI_CMD_SET_CSS_PWR_STATE = 0x03,
SCPI_CMD_GET_CSS_PWR_STATE = 0x04,
SCPI_CMD_SET_SYS_PWR_STATE = 0x05,
SCPI_CMD_SET_CPU_TIMER = 0x06,
SCPI_CMD_CANCEL_CPU_TIMER = 0x07,
SCPI_CMD_DVFS_CAPABILITIES = 0x08,
SCPI_CMD_GET_DVFS_INFO = 0x09,
SCPI_CMD_SET_DVFS = 0x0a,
SCPI_CMD_GET_DVFS = 0x0b,
SCPI_CMD_GET_DVFS_STAT = 0x0c,
SCPI_CMD_CLOCK_CAPABILITIES = 0x0d,
SCPI_CMD_GET_CLOCK_INFO = 0x0e,
SCPI_CMD_SET_CLOCK_VALUE = 0x0f,
SCPI_CMD_GET_CLOCK_VALUE = 0x10,
SCPI_CMD_PSU_CAPABILITIES = 0x11,
SCPI_CMD_GET_PSU_INFO = 0x12,
SCPI_CMD_SET_PSU = 0x13,
SCPI_CMD_GET_PSU = 0x14,
SCPI_CMD_SENSOR_CAPABILITIES = 0x15,
SCPI_CMD_SENSOR_INFO = 0x16,
SCPI_CMD_SENSOR_VALUE = 0x17,
SCPI_CMD_SENSOR_CFG_PERIODIC = 0x18,
SCPI_CMD_SENSOR_CFG_BOUNDS = 0x19,
SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1a,
SCPI_CMD_SET_DEVICE_PWR_STATE = 0x1b,
SCPI_CMD_GET_DEVICE_PWR_STATE = 0x1c,
SCPI_CMD_COUNT
};
/* SCPI Legacy Commands */
enum legacy_scpi_std_cmd {
LEGACY_SCPI_CMD_INVALID = 0x00,
LEGACY_SCPI_CMD_SCPI_READY = 0x01,
LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02,
LEGACY_SCPI_CMD_EVENT = 0x03,
LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08,
LEGACY_SCPI_CMD_L2_READY = 0x09,
LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a,
LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b,
LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d,
LEGACY_SCPI_CMD_SET_DVFS = 0x0e,
LEGACY_SCPI_CMD_GET_DVFS = 0x0f,
LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10,
LEGACY_SCPI_CMD_SET_RTC = 0x11,
LEGACY_SCPI_CMD_GET_RTC = 0x12,
LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14,
LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15,
LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16,
LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17,
LEGACY_SCPI_CMD_SET_PSU = 0x18,
LEGACY_SCPI_CMD_GET_PSU = 0x19,
LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b,
LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c,
LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
LEGACY_SCPI_CMD_COUNT
};
/* List all commands that are required to go through the high priority link */
static int legacy_hpriority_cmds[] = {
LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
LEGACY_SCPI_CMD_SET_DVFS,
LEGACY_SCPI_CMD_GET_DVFS,
LEGACY_SCPI_CMD_SET_RTC,
LEGACY_SCPI_CMD_GET_RTC,
LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
LEGACY_SCPI_CMD_SET_PSU,
LEGACY_SCPI_CMD_GET_PSU,
LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
};
/* List all commands used by this driver, used as indexes */
enum scpi_drv_cmds {
CMD_SCPI_CAPABILITIES = 0,
CMD_GET_CLOCK_INFO,
CMD_GET_CLOCK_VALUE,
CMD_SET_CLOCK_VALUE,
CMD_GET_DVFS,
CMD_SET_DVFS,
CMD_GET_DVFS_INFO,
CMD_SENSOR_CAPABILITIES,
CMD_SENSOR_INFO,
CMD_SENSOR_VALUE,
CMD_SET_DEVICE_PWR_STATE,
CMD_GET_DEVICE_PWR_STATE,
CMD_MAX_COUNT,
};
static int scpi_std_commands[CMD_MAX_COUNT] = {
SCPI_CMD_SCPI_CAPABILITIES,
SCPI_CMD_GET_CLOCK_INFO,
SCPI_CMD_GET_CLOCK_VALUE,
SCPI_CMD_SET_CLOCK_VALUE,
SCPI_CMD_GET_DVFS,
SCPI_CMD_SET_DVFS,
SCPI_CMD_GET_DVFS_INFO,
SCPI_CMD_SENSOR_CAPABILITIES,
SCPI_CMD_SENSOR_INFO,
SCPI_CMD_SENSOR_VALUE,
SCPI_CMD_SET_DEVICE_PWR_STATE,
SCPI_CMD_GET_DEVICE_PWR_STATE,
};
static int scpi_legacy_commands[CMD_MAX_COUNT] = {
LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
-1, /* GET_CLOCK_INFO */
LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
LEGACY_SCPI_CMD_GET_DVFS,
LEGACY_SCPI_CMD_SET_DVFS,
LEGACY_SCPI_CMD_GET_DVFS_INFO,
LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
LEGACY_SCPI_CMD_SENSOR_INFO,
LEGACY_SCPI_CMD_SENSOR_VALUE,
-1, /* SET_DEVICE_PWR_STATE */
-1, /* GET_DEVICE_PWR_STATE */
};
struct scpi_xfer {
u32 slot; /* has to be first element */
u32 cmd;
u32 status;
const void *tx_buf;
void *rx_buf;
unsigned int tx_len;
unsigned int rx_len;
struct list_head node;
struct completion done;
};
struct scpi_chan {
struct mbox_client cl;
struct mbox_chan *chan;
void __iomem *tx_payload;
void __iomem *rx_payload;
struct list_head rx_pending;
struct list_head xfers_list;
struct scpi_xfer *xfers;
spinlock_t rx_lock; /* locking for the rx pending list */
struct mutex xfers_lock;
u8 token;
};
struct scpi_drvinfo {
u32 protocol_version;
u32 firmware_version;
bool is_legacy;
int num_chans;
int *commands;
DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
atomic_t next_chan;
struct scpi_ops *scpi_ops;
struct scpi_chan *channels;
struct scpi_dvfs_info *dvfs[MAX_DVFS_DOMAINS];
};
/*
* The SCP firmware only executes in little-endian mode, so any buffers
* shared through SCPI should have their contents converted to little-endian
*/
struct scpi_shared_mem {
__le32 command;
__le32 status;
u8 payload[];
} __packed;
struct legacy_scpi_shared_mem {
__le32 status;
u8 payload[];
} __packed;
struct scp_capabilities {
__le32 protocol_version;
__le32 event_version;
__le32 platform_version;
__le32 commands[4];
} __packed;
struct clk_get_info {
__le16 id;
__le16 flags;
__le32 min_rate;
__le32 max_rate;
u8 name[20];
} __packed;
struct clk_set_value {
__le16 id;
__le16 reserved;
__le32 rate;
} __packed;
struct legacy_clk_set_value {
__le32 rate;
__le16 id;
__le16 reserved;
} __packed;
struct dvfs_info {
u8 domain;
u8 opp_count;
__le16 latency;
struct {
__le32 freq;
__le32 m_volt;
} opps[MAX_DVFS_OPPS];
} __packed;
struct dvfs_set {
u8 domain;
u8 index;
} __packed;
struct _scpi_sensor_info {
__le16 sensor_id;
u8 class;
u8 trigger_type;
char name[20];
};
struct dev_pstate_set {
__le16 dev_id;
u8 pstate;
} __packed;
static struct scpi_drvinfo *scpi_info;
static int scpi_linux_errmap[SCPI_ERR_MAX] = {
/* better than switch case as long as return value is continuous */
0, /* SCPI_SUCCESS */
-EINVAL, /* SCPI_ERR_PARAM */
-ENOEXEC, /* SCPI_ERR_ALIGN */
-EMSGSIZE, /* SCPI_ERR_SIZE */
-EINVAL, /* SCPI_ERR_HANDLER */
-EACCES, /* SCPI_ERR_ACCESS */
-ERANGE, /* SCPI_ERR_RANGE */
-ETIMEDOUT, /* SCPI_ERR_TIMEOUT */
-ENOMEM, /* SCPI_ERR_NOMEM */
-EINVAL, /* SCPI_ERR_PWRSTATE */
-EOPNOTSUPP, /* SCPI_ERR_SUPPORT */
-EIO, /* SCPI_ERR_DEVICE */
-EBUSY, /* SCPI_ERR_BUSY */
};
static inline int scpi_to_linux_errno(int errno)
{
if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
return scpi_linux_errmap[errno];
return -EIO;
}
static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
{
unsigned long flags;
struct scpi_xfer *t, *match = NULL;
spin_lock_irqsave(&ch->rx_lock, flags);
if (list_empty(&ch->rx_pending)) {
spin_unlock_irqrestore(&ch->rx_lock, flags);
return;
}
/* Command type is not replied by the SCP Firmware in legacy Mode
* We should consider that command is the head of pending RX commands
* if the list is not empty. In TX only mode, the list would be empty.
*/
if (scpi_info->is_legacy) {
match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
node);
list_del(&match->node);
} else {
list_for_each_entry(t, &ch->rx_pending, node)
if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
list_del(&t->node);
match = t;
break;
}
}
/* check if wait_for_completion is in progress or timed-out */
if (match && !completion_done(&match->done)) {
unsigned int len;
if (scpi_info->is_legacy) {
struct legacy_scpi_shared_mem __iomem *mem =
ch->rx_payload;
/* RX Length is not replied by the legacy Firmware */
len = match->rx_len;
match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
} else {
struct scpi_shared_mem __iomem *mem = ch->rx_payload;
len = min_t(unsigned int, match->rx_len, CMD_SIZE(cmd));
match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
}
if (match->rx_len > len)
memset(match->rx_buf + len, 0, match->rx_len - len);
complete(&match->done);
}
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem __iomem *mem = ch->rx_payload;
u32 cmd = 0;
if (!scpi_info->is_legacy)
cmd = ioread32(&mem->command);
scpi_process_cmd(ch, cmd);
}
static void scpi_tx_prepare(struct mbox_client *c, void *msg)
{
unsigned long flags;
struct scpi_xfer *t = msg;
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
struct scpi_shared_mem __iomem *mem = ch->tx_payload;
if (t->tx_buf) {
if (scpi_info->is_legacy)
memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
else
memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
}
if (t->rx_buf) {
if (!(++ch->token))
++ch->token;
t->cmd |= FIELD_PREP(CMD_TOKEN_ID_MASK, ch->token);
spin_lock_irqsave(&ch->rx_lock, flags);
list_add_tail(&t->node, &ch->rx_pending);
spin_unlock_irqrestore(&ch->rx_lock, flags);
}
if (!scpi_info->is_legacy)
iowrite32(t->cmd, &mem->command);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
{
struct scpi_xfer *t;
mutex_lock(&ch->xfers_lock);
if (list_empty(&ch->xfers_list)) {
mutex_unlock(&ch->xfers_lock);
return NULL;
}
t = list_first_entry(&ch->xfers_list, struct scpi_xfer, node);
list_del(&t->node);
mutex_unlock(&ch->xfers_lock);
return t;
}
static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
{
mutex_lock(&ch->xfers_lock);
list_add_tail(&t->node, &ch->xfers_list);
mutex_unlock(&ch->xfers_lock);
}
static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
void *rx_buf, unsigned int rx_len)
{
int ret;
u8 chan;
u8 cmd;
struct scpi_xfer *msg;
struct scpi_chan *scpi_chan;
if (scpi_info->commands[idx] < 0)
return -EOPNOTSUPP;
cmd = scpi_info->commands[idx];
if (scpi_info->is_legacy)
chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
else
chan = atomic_inc_return(&scpi_info->next_chan) %
scpi_info->num_chans;
scpi_chan = scpi_info->channels + chan;
msg = get_scpi_xfer(scpi_chan);
if (!msg)
return -ENOMEM;
if (scpi_info->is_legacy) {
msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
msg->slot = msg->cmd;
} else {
msg->slot = BIT(SCPI_SLOT);
msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
}
msg->tx_buf = tx_buf;
msg->tx_len = tx_len;
msg->rx_buf = rx_buf;
msg->rx_len = rx_len;
reinit_completion(&msg->done);
ret = mbox_send_message(scpi_chan->chan, msg);
if (ret < 0 || !rx_buf)
goto out;
if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT))
ret = -ETIMEDOUT;
else
/* first status word */
ret = msg->status;
out:
if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
scpi_process_cmd(scpi_chan, msg->cmd);
put_scpi_xfer(msg, scpi_chan);
/* SCPI error codes > 0, translate them to Linux scale*/
return ret > 0 ? scpi_to_linux_errno(ret) : ret;
}
static u32 scpi_get_version(void)
{
return scpi_info->protocol_version;
}
static int
scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
{
int ret;
struct clk_get_info clk;
__le16 le_clk_id = cpu_to_le16(clk_id);
ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
sizeof(le_clk_id), &clk, sizeof(clk));
if (!ret) {
*min = le32_to_cpu(clk.min_rate);
*max = le32_to_cpu(clk.max_rate);
}
return ret;
}
static unsigned long scpi_clk_get_val(u16 clk_id)
{
int ret;
__le32 rate;
__le16 le_clk_id = cpu_to_le16(clk_id);
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
if (ret)
return 0;
return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
{
int stat;
struct clk_set_value clk = {
.id = cpu_to_le16(clk_id),
.rate = cpu_to_le32(rate)
};
return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
&stat, sizeof(stat));
}
static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
{
int stat;
struct legacy_clk_set_value clk = {
.id = cpu_to_le16(clk_id),
.rate = cpu_to_le32(rate)
};
return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
&stat, sizeof(stat));
}
static int scpi_dvfs_get_idx(u8 domain)
{
int ret;
u8 dvfs_idx;
ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
&dvfs_idx, sizeof(dvfs_idx));
return ret ? ret : dvfs_idx;
}
static int scpi_dvfs_set_idx(u8 domain, u8 index)
{
int stat;
struct dvfs_set dvfs = {domain, index};
return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
&stat, sizeof(stat));
}
static int opp_cmp_func(const void *opp1, const void *opp2)
{
const struct scpi_opp *t1 = opp1, *t2 = opp2;
return t1->freq - t2->freq;
}
static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
{
struct scpi_dvfs_info *info;
struct scpi_opp *opp;
struct dvfs_info buf;
int ret, i;
if (domain >= MAX_DVFS_DOMAINS)
return ERR_PTR(-EINVAL);
if (scpi_info->dvfs[domain]) /* data already populated */
return scpi_info->dvfs[domain];
ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
&buf, sizeof(buf));
if (ret)
return ERR_PTR(ret);
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
info->count = buf.opp_count;
info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
if (!info->opps) {
kfree(info);
return ERR_PTR(-ENOMEM);
}
for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
opp->freq = le32_to_cpu(buf.opps[i].freq);
opp->m_volt = le32_to_cpu(buf.opps[i].m_volt);
}
sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
scpi_info->dvfs[domain] = info;
return info;
}
static int scpi_dev_domain_id(struct device *dev)
{
struct of_phandle_args clkspec;
if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
0, &clkspec))
return -EINVAL;
return clkspec.args[0];
}
static struct scpi_dvfs_info *scpi_dvfs_info(struct device *dev)
{
int domain = scpi_dev_domain_id(dev);
if (domain < 0)
return ERR_PTR(domain);
return scpi_dvfs_get_info(domain);
}
static int scpi_dvfs_get_transition_latency(struct device *dev)
{
struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
if (IS_ERR(info))
return PTR_ERR(info);
return info->latency;
}
static int scpi_dvfs_add_opps_to_device(struct device *dev)
{
int idx, ret;
struct scpi_opp *opp;
struct scpi_dvfs_info *info = scpi_dvfs_info(dev);
if (IS_ERR(info))
return PTR_ERR(info);
if (!info->opps)
return -EIO;
for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
ret = dev_pm_opp_add(dev, opp->freq, opp->m_volt * 1000);
if (ret) {
dev_warn(dev, "failed to add opp %uHz %umV\n",
opp->freq, opp->m_volt);
while (idx-- > 0)
dev_pm_opp_remove(dev, (--opp)->freq);
return ret;
}
}
return 0;
}
static int scpi_sensor_get_capability(u16 *sensors)
{
__le16 cap;
int ret;
ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap,
sizeof(cap));
if (!ret)
*sensors = le16_to_cpu(cap);
return ret;
}
static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
{
__le16 id = cpu_to_le16(sensor_id);
struct _scpi_sensor_info _info;
int ret;
ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
&_info, sizeof(_info));
if (!ret) {
memcpy(info, &_info, sizeof(*info));
info->sensor_id = le16_to_cpu(_info.sensor_id);
}
return ret;
}
static int scpi_sensor_get_value(u16 sensor, u64 *val)
{
__le16 id = cpu_to_le16(sensor);
__le64 value;
int ret;
ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
&value, sizeof(value));
if (ret)
return ret;
if (scpi_info->is_legacy)
/* only 32-bits supported, upper 32 bits can be junk */
*val = le32_to_cpup((__le32 *)&value);
else
*val = le64_to_cpu(value);
return 0;
}
static int scpi_device_get_power_state(u16 dev_id)
{
int ret;
u8 pstate;
__le16 id = cpu_to_le16(dev_id);
ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
sizeof(id), &pstate, sizeof(pstate));
return ret ? ret : pstate;
}
static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
{
int stat;
struct dev_pstate_set dev_set = {
.dev_id = cpu_to_le16(dev_id),
.pstate = pstate,
};
return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
sizeof(dev_set), &stat, sizeof(stat));
}
static struct scpi_ops scpi_ops = {
.get_version = scpi_get_version,
.clk_get_range = scpi_clk_get_range,
.clk_get_val = scpi_clk_get_val,
.clk_set_val = scpi_clk_set_val,
.dvfs_get_idx = scpi_dvfs_get_idx,
.dvfs_set_idx = scpi_dvfs_set_idx,
.dvfs_get_info = scpi_dvfs_get_info,
.device_domain_id = scpi_dev_domain_id,
.get_transition_latency = scpi_dvfs_get_transition_latency,
.add_opps_to_device = scpi_dvfs_add_opps_to_device,
.sensor_get_capability = scpi_sensor_get_capability,
.sensor_get_info = scpi_sensor_get_info,
.sensor_get_value = scpi_sensor_get_value,
.device_get_power_state = scpi_device_get_power_state,
.device_set_power_state = scpi_device_set_power_state,
};
struct scpi_ops *get_scpi_ops(void)
{
return scpi_info ? scpi_info->scpi_ops : NULL;
}
EXPORT_SYMBOL_GPL(get_scpi_ops);
static int scpi_init_versions(struct scpi_drvinfo *info)
{
int ret;
struct scp_capabilities caps;
ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
&caps, sizeof(caps));
if (!ret) {
info->protocol_version = le32_to_cpu(caps.protocol_version);
info->firmware_version = le32_to_cpu(caps.platform_version);
}
/* Ignore error if not implemented */
if (info->is_legacy && ret == -EOPNOTSUPP)
return 0;
return ret;
}
static ssize_t protocol_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
return sprintf(buf, "%lu.%lu\n",
FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
}
static DEVICE_ATTR_RO(protocol_version);
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
return sprintf(buf, "%lu.%lu.%lu\n",
FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
}
static DEVICE_ATTR_RO(firmware_version);
static struct attribute *versions_attrs[] = {
&dev_attr_firmware_version.attr,
&dev_attr_protocol_version.attr,
NULL,
};
ATTRIBUTE_GROUPS(versions);
static void scpi_free_channels(void *data)
{
struct scpi_drvinfo *info = data;
int i;
for (i = 0; i < info->num_chans; i++)
mbox_free_channel(info->channels[i].chan);
}
static int scpi_remove(struct platform_device *pdev)
{
int i;
struct scpi_drvinfo *info = platform_get_drvdata(pdev);
scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
kfree(info->dvfs[i]->opps);
kfree(info->dvfs[i]);
}
return 0;
}
#define MAX_SCPI_XFERS 10
static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
{
int i;
struct scpi_xfer *xfers;
xfers = devm_kcalloc(dev, MAX_SCPI_XFERS, sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
ch->xfers = xfers;
for (i = 0; i < MAX_SCPI_XFERS; i++, xfers++) {
init_completion(&xfers->done);
list_add_tail(&xfers->node, &ch->xfers_list);
}
return 0;
}
static const struct of_device_id legacy_scpi_of_match[] = {
{.compatible = "arm,scpi-pre-1.0"},
{},
};
static const struct of_device_id shmem_of_match[] __maybe_unused = {
{ .compatible = "amlogic,meson-gxbb-scp-shmem", },
{ .compatible = "amlogic,meson-axg-scp-shmem", },
{ .compatible = "arm,juno-scp-shmem", },
{ .compatible = "arm,scp-shmem", },
{ }
};
static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
struct resource res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct scpi_drvinfo *scpi_drvinfo;
scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL);
if (!scpi_drvinfo)
return -ENOMEM;
if (of_match_device(legacy_scpi_of_match, &pdev->dev))
scpi_drvinfo->is_legacy = true;
count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
if (count < 0) {
dev_err(dev, "no mboxes property in '%pOF'\n", np);
return -ENODEV;
}
scpi_drvinfo->channels =
devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL);
if (!scpi_drvinfo->channels)
return -ENOMEM;
ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo);
if (ret)
return ret;
for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) {
resource_size_t size;
int idx = scpi_drvinfo->num_chans;
struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
if (!of_match_node(shmem_of_match, shmem))
return -ENXIO;
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
return ret;
}
size = resource_size(&res);
pchan->rx_payload = devm_ioremap(dev, res.start, size);
if (!pchan->rx_payload) {
dev_err(dev, "failed to ioremap SCPI payload\n");
return -EADDRNOTAVAIL;
}
pchan->tx_payload = pchan->rx_payload + (size >> 1);
cl->dev = dev;
cl->rx_callback = scpi_handle_remote_msg;
cl->tx_prepare = scpi_tx_prepare;
cl->tx_block = true;
cl->tx_tout = 20;
cl->knows_txdone = false; /* controller can't ack */
INIT_LIST_HEAD(&pchan->rx_pending);
INIT_LIST_HEAD(&pchan->xfers_list);
spin_lock_init(&pchan->rx_lock);
mutex_init(&pchan->xfers_lock);
ret = scpi_alloc_xfer_list(dev, pchan);
if (!ret) {
pchan->chan = mbox_request_channel(cl, idx);
if (!IS_ERR(pchan->chan))
continue;
ret = PTR_ERR(pchan->chan);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get channel%d err %d\n",
idx, ret);
}
return ret;
}
scpi_drvinfo->commands = scpi_std_commands;
platform_set_drvdata(pdev, scpi_drvinfo);
if (scpi_drvinfo->is_legacy) {
/* Replace with legacy variants */
scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
scpi_drvinfo->commands = scpi_legacy_commands;
/* Fill priority bitmap */
for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
set_bit(legacy_hpriority_cmds[idx],
scpi_drvinfo->cmd_priority);
}
scpi_info = scpi_drvinfo;
ret = scpi_init_versions(scpi_drvinfo);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
scpi_info = NULL;
return ret;
}
if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version &&
!scpi_drvinfo->firmware_version)
dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n");
else
dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
FIELD_GET(PROTO_REV_MAJOR_MASK,
scpi_drvinfo->protocol_version),
FIELD_GET(PROTO_REV_MINOR_MASK,
scpi_drvinfo->protocol_version),
FIELD_GET(FW_REV_MAJOR_MASK,
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_MINOR_MASK,
scpi_drvinfo->firmware_version),
FIELD_GET(FW_REV_PATCH_MASK,
scpi_drvinfo->firmware_version));
scpi_drvinfo->scpi_ops = &scpi_ops;
ret = devm_of_platform_populate(dev);
if (ret)
scpi_info = NULL;
return ret;
}
static const struct of_device_id scpi_of_match[] = {
{.compatible = "arm,scpi"},
{.compatible = "arm,scpi-pre-1.0"},
{},
};
MODULE_DEVICE_TABLE(of, scpi_of_match);
static struct platform_driver scpi_driver = {
.driver = {
.name = "scpi_protocol",
.of_match_table = scpi_of_match,
.dev_groups = versions_groups,
},
.probe = scpi_probe,
.remove = scpi_remove,
};
module_platform_driver(scpi_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCPI mailbox protocol driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/arm_scpi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007-2010 Red Hat, Inc.
* by Peter Jones <[email protected]>
* Copyright 2007 IBM, Inc.
* by Konrad Rzeszutek <[email protected]>
* Copyright 2008
* by Konrad Rzeszutek <[email protected]>
*
* This code finds the iSCSI Boot Format Table.
*/
#include <linux/memblock.h>
#include <linux/blkdev.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/iscsi_ibft.h>
#include <asm/mmzone.h>
/*
* Physical location of iSCSI Boot Format Table.
*/
phys_addr_t ibft_phys_addr;
EXPORT_SYMBOL_GPL(ibft_phys_addr);
static const struct {
char *sign;
} ibft_signs[] = {
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
#define IBFT_SIGN_LEN 4
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
/*
* Routine used to find and reserve the iSCSI Boot Format Table
*/
void __init reserve_ibft_region(void)
{
unsigned long pos, virt_pos = 0;
unsigned int len = 0;
void *virt = NULL;
int i;
ibft_phys_addr = 0;
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this
*/
if (efi_enabled(EFI_BOOT))
return;
for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
/* The table can't be inside the VGA BIOS reserved space,
* so skip that area */
if (pos == VGA_MEM)
pos += VGA_SIZE;
/* Map page by page */
if (offset_in_page(pos) == 0) {
if (virt)
early_memunmap(virt, PAGE_SIZE);
virt = early_memremap_ro(pos, PAGE_SIZE);
virt_pos = pos;
}
for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) {
if (memcmp(virt + (pos - virt_pos), ibft_signs[i].sign,
IBFT_SIGN_LEN) == 0) {
unsigned long *addr =
(unsigned long *)(virt + pos - virt_pos + 4);
len = *addr;
/* if the length of the table extends past 1M,
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_phys_addr = pos;
memblock_reserve(ibft_phys_addr, PAGE_ALIGN(len));
pr_info("iBFT found at %pa.\n", &ibft_phys_addr);
goto out;
}
}
}
}
out:
early_memunmap(virt, PAGE_SIZE);
}
| linux-master | drivers/firmware/iscsi_ibft_find.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include <linux/arm-smccc.h>
#include "qcom_scm.h"
static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
module_param(download_mode, bool, 0);
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
struct clk *iface_clk;
struct clk *bus_clk;
struct icc_path *path;
struct completion waitq_comp;
struct reset_controller_dev reset;
/* control access to the interconnect path */
struct mutex scm_bw_lock;
int scm_vote_count;
u64 dload_mode_addr;
};
struct qcom_scm_current_perm_info {
__le32 vmid;
__le32 perm;
__le64 ctx;
__le32 ctx_size;
__le32 unused;
};
struct qcom_scm_mem_map_info {
__le64 mem_addr;
__le64 mem_size;
};
/* Each bit configures cold/warm boot address for one of the 4 CPUs */
static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
0, BIT(0), BIT(3), BIT(5)
};
static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
[SMC_CONVENTION_ARM_64] = "smc arm 64",
[SMC_CONVENTION_LEGACY] = "smc legacy",
};
static struct qcom_scm *__scm;
static int qcom_scm_clk_enable(void)
{
int ret;
ret = clk_prepare_enable(__scm->core_clk);
if (ret)
goto bail;
ret = clk_prepare_enable(__scm->iface_clk);
if (ret)
goto disable_core;
ret = clk_prepare_enable(__scm->bus_clk);
if (ret)
goto disable_iface;
return 0;
disable_iface:
clk_disable_unprepare(__scm->iface_clk);
disable_core:
clk_disable_unprepare(__scm->core_clk);
bail:
return ret;
}
static void qcom_scm_clk_disable(void)
{
clk_disable_unprepare(__scm->core_clk);
clk_disable_unprepare(__scm->iface_clk);
clk_disable_unprepare(__scm->bus_clk);
}
static int qcom_scm_bw_enable(void)
{
int ret = 0;
if (!__scm->path)
return 0;
if (IS_ERR(__scm->path))
return -EINVAL;
mutex_lock(&__scm->scm_bw_lock);
if (!__scm->scm_vote_count) {
ret = icc_set_bw(__scm->path, 0, UINT_MAX);
if (ret < 0) {
dev_err(__scm->dev, "failed to set bandwidth request\n");
goto err_bw;
}
}
__scm->scm_vote_count++;
err_bw:
mutex_unlock(&__scm->scm_bw_lock);
return ret;
}
static void qcom_scm_bw_disable(void)
{
if (IS_ERR_OR_NULL(__scm->path))
return;
mutex_lock(&__scm->scm_bw_lock);
if (__scm->scm_vote_count-- == 1)
icc_set_bw(__scm->path, 0, 0);
mutex_unlock(&__scm->scm_bw_lock);
}
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
static enum qcom_scm_convention __get_convention(void)
{
unsigned long flags;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_INFO,
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
QCOM_SCM_INFO_IS_CALL_AVAIL) |
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
.arginfo = QCOM_SCM_ARGS(1),
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
enum qcom_scm_convention probed_convention;
int ret;
bool forced = false;
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
return qcom_scm_convention;
/*
* Device isn't required as there is only one argument - no device
* needed to dma_map_single to secure world
*/
probed_convention = SMC_CONVENTION_ARM_64;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
goto found;
/*
* Some SC7180 firmwares didn't implement the
* QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
* calling conventions on these firmwares. Luckily we don't make any
* early calls into the firmware on these SoCs so the device pointer
* will be valid here to check if the compatible matches.
*/
if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
forced = true;
goto found;
}
probed_convention = SMC_CONVENTION_ARM_32;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
if (!ret && res.result[0] == 1)
goto found;
probed_convention = SMC_CONVENTION_LEGACY;
found:
spin_lock_irqsave(&scm_query_lock, flags);
if (probed_convention != qcom_scm_convention) {
qcom_scm_convention = probed_convention;
pr_info("qcom_scm: convention: %s%s\n",
qcom_scm_convention_names[qcom_scm_convention],
forced ? " (forced)" : "");
}
spin_unlock_irqrestore(&scm_query_lock, flags);
return qcom_scm_convention;
}
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
might_sleep();
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
return scm_smc_call(dev, desc, res, false);
case SMC_CONVENTION_LEGACY:
return scm_legacy_call(dev, desc, res);
default:
pr_err("Unknown current SCM calling convention.\n");
return -EINVAL;
}
}
/**
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This can be called in atomic context.
*/
static int qcom_scm_call_atomic(struct device *dev,
const struct qcom_scm_desc *desc,
struct qcom_scm_res *res)
{
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
return scm_smc_call(dev, desc, res, true);
case SMC_CONVENTION_LEGACY:
return scm_legacy_call_atomic(dev, desc, res);
default:
pr_err("Unknown current SCM calling convention.\n");
return -EINVAL;
}
}
static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
u32 cmd_id)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_INFO,
.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
desc.arginfo = QCOM_SCM_ARGS(1);
switch (__get_convention()) {
case SMC_CONVENTION_ARM_32:
case SMC_CONVENTION_ARM_64:
desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
break;
case SMC_CONVENTION_LEGACY:
desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
break;
default:
pr_err("Unknown SMC convention being used\n");
return false;
}
ret = qcom_scm_call(dev, &desc, &res);
return ret ? false : !!res.result[0];
}
static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
{
int cpu;
unsigned int flags = 0;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_ADDR,
.arginfo = QCOM_SCM_ARGS(2),
.owner = ARM_SMCCC_OWNER_SIP,
};
for_each_present_cpu(cpu) {
if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
return -EINVAL;
flags |= cpu_bits[cpu];
}
desc.args[0] = flags;
desc.args[1] = virt_to_phys(entry);
return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
.owner = ARM_SMCCC_OWNER_SIP,
.arginfo = QCOM_SCM_ARGS(6),
.args = {
virt_to_phys(entry),
/* Apply to all CPUs in all affinity levels */
~0ULL, ~0ULL, ~0ULL, ~0ULL,
flags,
},
};
/* Need a device for DMA of the additional arguments */
if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
return -EOPNOTSUPP;
return qcom_scm_call(__scm->dev, &desc, NULL);
}
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
* @entry: Entry point function for the cpus
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
int qcom_scm_set_warm_boot_addr(void *entry)
{
if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
/* Fallback to old SCM call */
return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
* @entry: Entry point function for the cpus
*/
int qcom_scm_set_cold_boot_addr(void *entry)
{
if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
/* Fallback to old SCM call */
return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
/**
* qcom_scm_cpu_power_down() - Power down the cpu
* @flags: Flags to flush cache
*
* This is an end point to power down cpu. If there was a pending interrupt,
* the control would return from this function, otherwise, the cpu jumps to the
* warm boot entry point set for this cpu upon reset.
*/
void qcom_scm_cpu_power_down(u32 flags)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
.arginfo = QCOM_SCM_ARGS(1),
.owner = ARM_SMCCC_OWNER_SIP,
};
qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
int qcom_scm_set_remote_state(u32 state, u32 id)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = state,
.args[1] = id,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_BOOT,
.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
.owner = ARM_SMCCC_OWNER_SIP,
};
desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
int ret = 0;
avail = __qcom_scm_is_call_available(__scm->dev,
QCOM_SCM_SVC_BOOT,
QCOM_SCM_BOOT_SET_DLOAD_MODE);
if (avail) {
ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
} else if (__scm->dload_mode_addr) {
ret = qcom_scm_io_writel(__scm->dload_mode_addr,
enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
} else {
dev_err(__scm->dev,
"No available mechanism for setting download mode\n");
}
if (ret)
dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
}
/**
* qcom_scm_pas_init_image() - Initialize peripheral authentication service
* state machine for a given peripheral, using the
* metadata
* @peripheral: peripheral id
* @metadata: pointer to memory containing ELF header, program header table
* and optional blob of data used for authenticating the metadata
* and the rest of the firmware
* @size: size of the metadata
* @ctx: optional metadata context
*
* Return: 0 on success.
*
* Upon successful return, the PAS metadata context (@ctx) will be used to
* track the metadata allocation, this needs to be released by invoking
* qcom_scm_pas_metadata_release() by the caller.
*/
int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
struct qcom_scm_pas_metadata *ctx)
{
dma_addr_t mdata_phys;
void *mdata_buf;
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
/*
* During the scm call memory protection will be enabled for the meta
* data blob, so make sure it's physically contiguous, 4K aligned and
* non-cachable to avoid XPU violations.
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL);
if (!mdata_buf) {
dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
return -ENOMEM;
}
memcpy(mdata_buf, metadata, size);
ret = qcom_scm_clk_enable();
if (ret)
goto out;
ret = qcom_scm_bw_enable();
if (ret)
return ret;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
qcom_scm_clk_disable();
out:
if (ret < 0 || !ctx) {
dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
} else if (ctx) {
ctx->ptr = mdata_buf;
ctx->phys = mdata_phys;
ctx->size = size;
}
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
/**
* qcom_scm_pas_metadata_release() - release metadata context
* @ctx: metadata context
*/
void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
{
if (!ctx->ptr)
return;
dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
ctx->ptr = NULL;
ctx->phys = 0;
ctx->size = 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
/**
* qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
* for firmware loading
* @peripheral: peripheral id
* @addr: start address of memory area to prepare
* @size: size of the memory area to prepare
*
* Returns 0 on success.
*/
int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
.arginfo = QCOM_SCM_ARGS(3),
.args[0] = peripheral,
.args[1] = addr,
.args[2] = size,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = qcom_scm_bw_enable();
if (ret)
return ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
/**
* qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
* and reset the remote processor
* @peripheral: peripheral id
*
* Return 0 on success.
*/
int qcom_scm_pas_auth_and_reset(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = qcom_scm_bw_enable();
if (ret)
return ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
/**
* qcom_scm_pas_shutdown() - Shut down the remote processor
* @peripheral: peripheral id
*
* Returns 0 on success.
*/
int qcom_scm_pas_shutdown(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = qcom_scm_bw_enable();
if (ret)
return ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
qcom_scm_clk_disable();
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
/**
* qcom_scm_pas_supported() - Check if the peripheral authentication service is
* available for the given peripherial
* @peripheral: peripheral id
*
* Returns true if PAS is supported for this peripheral, otherwise false.
*/
bool qcom_scm_pas_supported(u32 peripheral)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = peripheral,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
QCOM_SCM_PIL_PAS_IS_SUPPORTED))
return false;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? false : !!res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_PIL,
.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = reset,
.args[1] = 0,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
if (idx != 0)
return -EINVAL;
return __qcom_scm_pas_mss_reset(__scm->dev, 1);
}
static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long idx)
{
if (idx != 0)
return -EINVAL;
return __qcom_scm_pas_mss_reset(__scm->dev, 0);
}
static const struct reset_control_ops qcom_scm_pas_reset_ops = {
.assert = qcom_scm_pas_reset_assert,
.deassert = qcom_scm_pas_reset_deassert,
};
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_IO,
.cmd = QCOM_SCM_IO_READ,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = addr,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
if (ret >= 0)
*val = res.result[0];
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_IO,
.cmd = QCOM_SCM_IO_WRITE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = addr,
.args[1] = val,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
/**
* qcom_scm_restore_sec_cfg_available() - Check if secure environment
* supports restore security config interface.
*
* Return true if restore-cfg interface is supported, false if not.
*/
bool qcom_scm_restore_sec_cfg_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
QCOM_SCM_MP_RESTORE_SEC_CFG);
}
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = device_id,
.args[1] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
int ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
if (size)
*size = res.result[0];
return ret ? : res.result[1];
}
EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_VAL),
.args[0] = addr,
.args[1] = size,
.args[2] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
int ret;
ret = qcom_scm_call(__scm->dev, &desc, NULL);
/* the pg table has been initialized already, ignore the error */
if (ret == -EPERM)
ret = 0;
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = size,
.args[1] = spare,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
u32 cp_nonpixel_start,
u32 cp_nonpixel_size)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_VIDEO_VAR,
.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
QCOM_SCM_VAL, QCOM_SCM_VAL),
.args[0] = cp_start,
.args[1] = cp_size,
.args[2] = cp_nonpixel_start,
.args[3] = cp_nonpixel_size,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_call(__scm->dev, &desc, &res);
return ret ? : res.result[0];
}
EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
size_t mem_sz, phys_addr_t src, size_t src_sz,
phys_addr_t dest, size_t dest_sz)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_MP,
.cmd = QCOM_SCM_MP_ASSIGN,
.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
QCOM_SCM_VAL, QCOM_SCM_VAL),
.args[0] = mem_region,
.args[1] = mem_sz,
.args[2] = src,
.args[3] = src_sz,
.args[4] = dest,
.args[5] = dest_sz,
.args[6] = 0,
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
ret = qcom_scm_call(dev, &desc, &res);
return ret ? : res.result[0];
}
/**
* qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
* @mem_addr: mem region whose ownership need to be reassigned
* @mem_sz: size of the region.
* @srcvm: vmid for current set of owners, each set bit in
* flag indicate a unique owner
* @newvm: array having new owners and corresponding permission
* flags
* @dest_cnt: number of owners in next set.
*
* Return negative errno on failure or 0 on success with @srcvm updated.
*/
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
u64 *srcvm,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt)
{
struct qcom_scm_current_perm_info *destvm;
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
dma_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
size_t ptr_sz;
int next_vm;
__le32 *src;
void *ptr;
int ret, i, b;
u64 srcvm_bits = *srcvm;
src_sz = hweight64(srcvm_bits) * sizeof(*src);
mem_to_map_sz = sizeof(*mem_to_map);
dest_sz = dest_cnt * sizeof(*destvm);
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
/* Fill source vmid detail */
src = ptr;
i = 0;
for (b = 0; b < BITS_PER_TYPE(u64); b++) {
if (srcvm_bits & BIT(b))
src[i++] = cpu_to_le32(b);
}
/* Fill details of mem buff to map */
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
mem_to_map->mem_addr = cpu_to_le64(mem_addr);
mem_to_map->mem_size = cpu_to_le64(mem_sz);
next_vm = 0;
/* Fill details of next vmid detail */
destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
destvm->vmid = cpu_to_le32(newvm->vmid);
destvm->perm = cpu_to_le32(newvm->perm);
destvm->ctx = 0;
destvm->ctx_size = 0;
next_vm |= BIT(newvm->vmid);
}
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
return -EINVAL;
}
*srcvm = next_vm;
return 0;
}
EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
/**
* qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
*/
bool qcom_scm_ocmem_lock_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
QCOM_SCM_OCMEM_LOCK_CMD);
}
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
/**
* qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
* region to the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
* @mode: access mode (WIDE/NARROW)
*/
int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
u32 mode)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_OCMEM,
.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
.args[0] = id,
.args[1] = offset,
.args[2] = size,
.args[3] = mode,
.arginfo = QCOM_SCM_ARGS(4),
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
/**
* qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
* region from the specified initiator
*
* @id: tz initiator id
* @offset: OCMEM offset
* @size: OCMEM size
*/
int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_OCMEM,
.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
.args[0] = id,
.args[1] = offset,
.args[2] = size,
.arginfo = QCOM_SCM_ARGS(3),
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
/**
* qcom_scm_ice_available() - Is the ICE key programming interface available?
*
* Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
* qcom_scm_ice_set_key() are available.
*/
bool qcom_scm_ice_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
/**
* qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
* @index: the keyslot to invalidate
*
* The UFSHCI and eMMC standards define a standard way to do this, but it
* doesn't work on these SoCs; only this SCM call does.
*
* It is assumed that the SoC has only one ICE instance being used, as this SCM
* call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
int qcom_scm_ice_invalidate_key(u32 index)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = index,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
/**
* qcom_scm_ice_set_key() - Set an inline encryption key
* @index: the keyslot into which to set the key
* @key: the key to program
* @key_size: the size of the key in bytes
* @cipher: the encryption algorithm the key is for
* @data_unit_size: the encryption data unit size, i.e. the size of each
* individual plaintext and ciphertext. Given in 512-byte
* units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
*
* Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
* can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
*
* The UFSHCI and eMMC standards define a standard way to do this, but it
* doesn't work on these SoCs; only this SCM call does.
*
* It is assumed that the SoC has only one ICE instance being used, as this SCM
* call doesn't specify which ICE instance the keyslot belongs to.
*
* Return: 0 on success; -errno on failure.
*/
int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_ES,
.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
QCOM_SCM_VAL, QCOM_SCM_VAL,
QCOM_SCM_VAL),
.args[0] = index,
.args[2] = key_size,
.args[3] = cipher,
.args[4] = data_unit_size,
.owner = ARM_SMCCC_OWNER_SIP,
};
void *keybuf;
dma_addr_t key_phys;
int ret;
/*
* 'key' may point to vmalloc()'ed memory, but we need to pass a
* physical address that's been properly flushed. The sanctioned way to
* do this is by using the DMA API. But as is best practice for crypto
* keys, we also must wipe the key after use. This makes kmemdup() +
* dma_map_single() not clearly correct, since the DMA API can use
* bounce buffers. Instead, just use dma_alloc_coherent(). Programming
* keys is normally rare and thus not performance-critical.
*/
keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
GFP_KERNEL);
if (!keybuf)
return -ENOMEM;
memcpy(keybuf, key, key_size);
desc.args[1] = key_phys;
ret = qcom_scm_call(__scm->dev, &desc, NULL);
memzero_explicit(keybuf, key_size);
dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
/**
* qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
*
* Return true if HDCP is supported, false if not.
*/
bool qcom_scm_hdcp_available(void)
{
bool avail;
int ret = qcom_scm_clk_enable();
if (ret)
return ret;
avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
QCOM_SCM_HDCP_INVOKE);
qcom_scm_clk_disable();
return avail;
}
EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
/**
* qcom_scm_hdcp_req() - Send HDCP request.
* @req: HDCP request array
* @req_cnt: HDCP request array count
* @resp: response buffer passed to SCM
*
* Write HDCP register(s) through SCM.
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
int ret;
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_HDCP,
.cmd = QCOM_SCM_HDCP_INVOKE,
.arginfo = QCOM_SCM_ARGS(10),
.args = {
req[0].addr,
req[0].val,
req[1].addr,
req[1].val,
req[2].addr,
req[2].val,
req[3].addr,
req[3].val,
req[4].addr,
req[4].val
},
.owner = ARM_SMCCC_OWNER_SIP,
};
struct qcom_scm_res res;
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
ret = qcom_scm_clk_enable();
if (ret)
return ret;
ret = qcom_scm_call(__scm->dev, &desc, &res);
*resp = res.result[0];
qcom_scm_clk_disable();
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
.cmd = QCOM_SCM_SMMU_PT_FORMAT,
.arginfo = QCOM_SCM_ARGS(3),
.args[0] = sec_id,
.args[1] = ctx_num,
.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
.arginfo = QCOM_SCM_ARGS(2),
.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
.args[1] = en,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
bool qcom_scm_lmh_dcvsh_available(void)
{
return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
int qcom_scm_lmh_profile_change(u32 profile_id)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_LMH,
.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
.args[0] = profile_id,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
u64 limit_node, u32 node_id, u64 version)
{
dma_addr_t payload_phys;
u32 *payload_buf;
int ret, payload_size = 5 * sizeof(u32);
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_LMH,
.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
QCOM_SCM_VAL, QCOM_SCM_VAL),
.args[1] = payload_size,
.args[2] = limit_node,
.args[3] = node_id,
.args[4] = version,
.owner = ARM_SMCCC_OWNER_SIP,
};
payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
if (!payload_buf)
return -ENOMEM;
payload_buf[0] = payload_fn;
payload_buf[1] = 0;
payload_buf[2] = payload_reg;
payload_buf[3] = 1;
payload_buf[4] = payload_val;
desc.args[0] = payload_phys;
ret = qcom_scm_call(__scm->dev, &desc, NULL);
dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
struct device_node *np = dev->of_node;
struct resource res;
u32 offset;
int ret;
tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
if (!tcsr)
return 0;
ret = of_address_to_resource(tcsr, 0, &res);
of_node_put(tcsr);
if (ret)
return ret;
ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
if (ret < 0)
return ret;
*addr = res.start + offset;
return 0;
}
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
{
return !!__scm;
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
{
/* FW currently only supports a single wq_ctx (zero).
* TODO: Update this logic to include dynamic allocation and lookup of
* completion structs when FW supports more wq_ctx values.
*/
if (wq_ctx != 0) {
dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
return -EINVAL;
}
return 0;
}
int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
{
int ret;
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
if (ret)
return ret;
wait_for_completion(&__scm->waitq_comp);
return 0;
}
static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
{
int ret;
ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
if (ret)
return ret;
complete(&__scm->waitq_comp);
return 0;
}
static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
{
int ret;
struct qcom_scm *scm = data;
u32 wq_ctx, flags, more_pending = 0;
do {
ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
if (ret) {
dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
goto out;
}
if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
goto out;
}
ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
if (ret)
goto out;
} while (more_pending);
out:
return IRQ_HANDLED;
}
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
return -ENOMEM;
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
if (ret < 0)
return ret;
mutex_init(&scm->scm_bw_lock);
scm->path = devm_of_icc_get(&pdev->dev, NULL);
if (IS_ERR(scm->path))
return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
"failed to acquire interconnect path\n");
scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
if (IS_ERR(scm->core_clk))
return PTR_ERR(scm->core_clk);
scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
if (IS_ERR(scm->iface_clk))
return PTR_ERR(scm->iface_clk);
scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
if (IS_ERR(scm->bus_clk))
return PTR_ERR(scm->bus_clk);
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
if (ret)
return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX);
if (ret)
return ret;
__scm = scm;
__scm->dev = &pdev->dev;
init_completion(&__scm->waitq_comp);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
if (irq != -ENXIO)
return irq;
} else {
ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
IRQF_ONESHOT, "qcom-scm", __scm);
if (ret < 0)
return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
}
__get_convention();
/*
* If requested enable "download mode", from this point on warmboot
* will cause the boot stages to enter download mode, unless
* disabled below by a clean shutdown/reboot.
*/
if (download_mode)
qcom_scm_set_download_mode(true);
return 0;
}
static void qcom_scm_shutdown(struct platform_device *pdev)
{
/* Clean shutdown, disable download mode to allow normal restart */
qcom_scm_set_download_mode(false);
}
static const struct of_device_id qcom_scm_dt_match[] = {
{ .compatible = "qcom,scm" },
/* Legacy entries kept for backwards compatibility */
{ .compatible = "qcom,scm-apq8064" },
{ .compatible = "qcom,scm-apq8084" },
{ .compatible = "qcom,scm-ipq4019" },
{ .compatible = "qcom,scm-msm8953" },
{ .compatible = "qcom,scm-msm8974" },
{ .compatible = "qcom,scm-msm8996" },
{}
};
MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
.of_match_table = qcom_scm_dt_match,
.suppress_bind_attrs = true,
},
.probe = qcom_scm_probe,
.shutdown = qcom_scm_shutdown,
};
static int __init qcom_scm_init(void)
{
return platform_driver_register(&qcom_scm_driver);
}
subsys_initcall(qcom_scm_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/qcom_scm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018, Intel Corporation
*/
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/firmware/intel/stratix10-smc.h>
#include <linux/firmware/intel/stratix10-svc-client.h>
#include <linux/types.h>
/**
* SVC_NUM_DATA_IN_FIFO - number of struct stratix10_svc_data in the FIFO
*
* SVC_NUM_CHANNEL - number of channel supported by service layer driver
*
* FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS - claim back the submitted buffer(s)
* from the secure world for FPGA manager to reuse, or to free the buffer(s)
* when all bit-stream data had be send.
*
* FPGA_CONFIG_STATUS_TIMEOUT_SEC - poll the FPGA configuration status,
* service layer will return error to FPGA manager when timeout occurs,
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
#define SVC_NUM_DATA_IN_FIFO 32
#define SVC_NUM_CHANNEL 3
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
#define BYTE_TO_WORD_SIZE 4
/* stratix10 service layer clients */
#define STRATIX10_RSU "stratix10-rsu"
#define INTEL_FCS "intel-fcs"
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
struct stratix10_svc_chan;
/**
* struct stratix10_svc - svc private data
* @stratix10_svc_rsu: pointer to stratix10 RSU device
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
struct platform_device *intel_svc_fcs;
};
/**
* struct stratix10_svc_sh_memory - service shared memory structure
* @sync_complete: state for a completion
* @addr: physical address of shared memory block
* @size: size of shared memory block
* @invoke_fn: function to issue secure monitor or hypervisor call
*
* This struct is used to save physical address and size of shared memory
* block. The shared memory blocked is allocated by secure monitor software
* at secure world.
*
* Service layer driver uses the physical address and size to create a memory
* pool, then allocates data buffer from that memory pool for service client.
*/
struct stratix10_svc_sh_memory {
struct completion sync_complete;
unsigned long addr;
unsigned long size;
svc_invoke_fn *invoke_fn;
};
/**
* struct stratix10_svc_data_mem - service memory structure
* @vaddr: virtual address
* @paddr: physical address
* @size: size of memory
* @node: link list head node
*
* This struct is used in a list that keeps track of buffers which have
* been allocated or freed from the memory pool. Service layer driver also
* uses this struct to transfer physical address to virtual address.
*/
struct stratix10_svc_data_mem {
void *vaddr;
phys_addr_t paddr;
size_t size;
struct list_head node;
};
/**
* struct stratix10_svc_data - service data structure
* @chan: service channel
* @paddr: physical address of to be processed payload
* @size: to be processed playload size
* @paddr_output: physical address of processed payload
* @size_output: processed payload size
* @command: service command requested by client
* @flag: configuration type (full or partial)
* @arg: args to be passed via registers and not physically mapped buffers
*
* This struct is used in service FIFO for inter-process communication.
*/
struct stratix10_svc_data {
struct stratix10_svc_chan *chan;
phys_addr_t paddr;
size_t size;
phys_addr_t paddr_output;
size_t size_output;
u32 command;
u32 flag;
u64 arg[3];
};
/**
* struct stratix10_svc_controller - service controller
* @dev: device
* @chans: array of service channels
* @num_chans: number of channels in 'chans' array
* @num_active_client: number of active service client
* @node: list management
* @genpool: memory pool pointing to the memory region
* @task: pointer to the thread task which handles SMC or HVC call
* @svc_fifo: a queue for storing service message data
* @complete_status: state for completion
* @svc_fifo_lock: protect access to service message data queue
* @invoke_fn: function to issue secure monitor call or hypervisor call
*
* This struct is used to create communication channels for service clients, to
* handle secure monitor or hypervisor call.
*/
struct stratix10_svc_controller {
struct device *dev;
struct stratix10_svc_chan *chans;
int num_chans;
int num_active_client;
struct list_head node;
struct gen_pool *genpool;
struct task_struct *task;
struct kfifo svc_fifo;
struct completion complete_status;
spinlock_t svc_fifo_lock;
svc_invoke_fn *invoke_fn;
};
/**
* struct stratix10_svc_chan - service communication channel
* @ctrl: pointer to service controller which is the provider of this channel
* @scl: pointer to service client which owns the channel
* @name: service client name associated with the channel
* @lock: protect access to the channel
*
* This struct is used by service client to communicate with service layer, each
* service client has its own channel created by service controller.
*/
struct stratix10_svc_chan {
struct stratix10_svc_controller *ctrl;
struct stratix10_svc_client *scl;
char *name;
spinlock_t lock;
};
static LIST_HEAD(svc_ctrl);
static LIST_HEAD(svc_data_mem);
/**
* svc_pa_to_va() - translate physical address to virtual address
* @addr: to be translated physical address
*
* Return: valid virtual address or NULL if the provided physical
* address doesn't exist.
*/
static void *svc_pa_to_va(unsigned long addr)
{
struct stratix10_svc_data_mem *pmem;
pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->paddr == addr)
return pmem->vaddr;
/* physical address is not found */
return NULL;
}
/**
* svc_thread_cmd_data_claim() - claim back buffer from the secure world
* @ctrl: pointer to service layer controller
* @p_data: pointer to service data structure
* @cb_data: pointer to callback data structure to service client
*
* Claim back the submitted buffers from the secure world and pass buffer
* back to service client (FPGA manager, etc) for reuse.
*/
static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
struct stratix10_svc_data *p_data,
struct stratix10_svc_cb_data *cb_data)
{
struct arm_smccc_res res;
unsigned long timeout;
reinit_completion(&ctrl->complete_status);
timeout = msecs_to_jiffies(FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS);
pr_debug("%s: claim back the submitted buffer\n", __func__);
do {
ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE,
0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
if (!res.a1) {
complete(&ctrl->complete_status);
break;
}
cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
cb_data->kaddr1 = svc_pa_to_va(res.a1);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
cb_data->kaddr3 = (res.a3) ?
svc_pa_to_va(res.a3) : NULL;
p_data->chan->scl->receive_cb(p_data->chan->scl,
cb_data);
} else {
pr_debug("%s: secure world busy, polling again\n",
__func__);
}
} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
wait_for_completion_timeout(&ctrl->complete_status, timeout));
}
/**
* svc_thread_cmd_config_status() - check configuration status
* @ctrl: pointer to service layer controller
* @p_data: pointer to service data structure
* @cb_data: pointer to callback data structure to service client
*
* Check whether the secure firmware at secure world has finished the FPGA
* configuration, and then inform FPGA manager the configuration status.
*/
static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
struct stratix10_svc_data *p_data,
struct stratix10_svc_cb_data *cb_data)
{
struct arm_smccc_res res;
int count_in_sec;
unsigned long a0, a1, a2;
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
cb_data->status = BIT(SVC_STATUS_ERROR);
pr_debug("%s: polling config status\n", __func__);
a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
a1 = (unsigned long)p_data->paddr;
a2 = (unsigned long)p_data->size;
if (p_data->command == COMMAND_POLL_SERVICE_STATUS)
a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
count_in_sec = FPGA_CONFIG_STATUS_TIMEOUT_SEC;
while (count_in_sec) {
ctrl->invoke_fn(a0, a1, a2, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
(res.a0 == INTEL_SIP_SMC_STATUS_ERROR) ||
(res.a0 == INTEL_SIP_SMC_STATUS_REJECTED))
break;
/*
* request is still in progress, wait one second then
* poll again
*/
msleep(1000);
count_in_sec--;
}
if (!count_in_sec) {
pr_err("%s: poll status timeout\n", __func__);
cb_data->status = BIT(SVC_STATUS_BUSY);
} else if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
cb_data->status = BIT(SVC_STATUS_COMPLETED);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
} else {
pr_err("%s: poll status error\n", __func__);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
cb_data->kaddr3 = (res.a3) ? &res.a3 : NULL;
cb_data->status = BIT(SVC_STATUS_ERROR);
}
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
/**
* svc_thread_recv_status_ok() - handle the successful status
* @p_data: pointer to service data structure
* @cb_data: pointer to callback data structure to service client
* @res: result from SMC or HVC call
*
* Send back the correspond status to the service clients.
*/
static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
struct stratix10_svc_cb_data *cb_data,
struct arm_smccc_res res)
{
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
switch (p_data->command) {
case COMMAND_RECONFIG:
case COMMAND_RSU_UPDATE:
case COMMAND_RSU_NOTIFY:
case COMMAND_FCS_REQUEST_SERVICE:
case COMMAND_FCS_SEND_CERTIFICATE:
case COMMAND_FCS_DATA_ENCRYPTION:
case COMMAND_FCS_DATA_DECRYPTION:
cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
break;
case COMMAND_RECONFIG_STATUS:
cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
case COMMAND_RSU_MAX_RETRY:
case COMMAND_RSU_DCMF_STATUS:
case COMMAND_FIRMWARE_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
case COMMAND_SMC_SVC_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = &res.a2;
break;
case COMMAND_RSU_DCMF_VERSION:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = &res.a2;
break;
case COMMAND_FCS_RANDOM_NUMBER_GEN:
case COMMAND_FCS_GET_PROVISION_DATA:
case COMMAND_POLL_SERVICE_STATUS:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
cb_data->kaddr2 = svc_pa_to_va(res.a2);
cb_data->kaddr3 = &res.a3;
break;
case COMMAND_MBOX_SEND_CMD:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
/* SDM return size in u8. Convert size to u32 word */
res.a2 = res.a2 * BYTE_TO_WORD_SIZE;
cb_data->kaddr2 = &res.a2;
break;
default:
pr_warn("it shouldn't happen\n");
break;
}
pr_debug("%s: call receive_cb\n", __func__);
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
/**
* svc_normal_to_secure_thread() - the function to run in the kthread
* @data: data pointer for kthread function
*
* Service layer driver creates stratix10_svc_smc_hvc_call kthread on CPU
* node 0, its function stratix10_svc_secure_call_thread is used to handle
* SMC or HVC calls between kernel driver and secure monitor software.
*
* Return: 0 for success or -ENOMEM on error.
*/
static int svc_normal_to_secure_thread(void *data)
{
struct stratix10_svc_controller
*ctrl = (struct stratix10_svc_controller *)data;
struct stratix10_svc_data *pdata;
struct stratix10_svc_cb_data *cbdata;
struct arm_smccc_res res;
unsigned long a0, a1, a2, a3, a4, a5, a6, a7;
int ret_fifo = 0;
pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
cbdata = kmalloc(sizeof(*cbdata), GFP_KERNEL);
if (!cbdata) {
kfree(pdata);
return -ENOMEM;
}
/* default set, to remove build warning */
a0 = INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK;
a1 = 0;
a2 = 0;
a3 = 0;
a4 = 0;
a5 = 0;
a6 = 0;
a7 = 0;
pr_debug("smc_hvc_shm_thread is running\n");
while (!kthread_should_stop()) {
ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
pdata, sizeof(*pdata),
&ctrl->svc_fifo_lock);
if (!ret_fifo)
continue;
pr_debug("get from FIFO pa=0x%016x, command=%u, size=%u\n",
(unsigned int)pdata->paddr, pdata->command,
(unsigned int)pdata->size);
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_CLAIM:
svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
continue;
case COMMAND_RECONFIG:
a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
pr_debug("conf_type=%u\n", (unsigned int)pdata->flag);
a1 = pdata->flag;
a2 = 0;
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
a0 = INTEL_SIP_SMC_FPGA_CONFIG_WRITE;
a1 = (unsigned long)pdata->paddr;
a2 = (unsigned long)pdata->size;
break;
case COMMAND_RECONFIG_STATUS:
a0 = INTEL_SIP_SMC_FPGA_CONFIG_ISDONE;
a1 = 0;
a2 = 0;
break;
case COMMAND_RSU_STATUS:
a0 = INTEL_SIP_SMC_RSU_STATUS;
a1 = 0;
a2 = 0;
break;
case COMMAND_RSU_UPDATE:
a0 = INTEL_SIP_SMC_RSU_UPDATE;
a1 = pdata->arg[0];
a2 = 0;
break;
case COMMAND_RSU_NOTIFY:
a0 = INTEL_SIP_SMC_RSU_NOTIFY;
a1 = pdata->arg[0];
a2 = 0;
break;
case COMMAND_RSU_RETRY:
a0 = INTEL_SIP_SMC_RSU_RETRY_COUNTER;
a1 = 0;
a2 = 0;
break;
case COMMAND_RSU_MAX_RETRY:
a0 = INTEL_SIP_SMC_RSU_MAX_RETRY;
a1 = 0;
a2 = 0;
break;
case COMMAND_RSU_DCMF_VERSION:
a0 = INTEL_SIP_SMC_RSU_DCMF_VERSION;
a1 = 0;
a2 = 0;
break;
case COMMAND_FIRMWARE_VERSION:
a0 = INTEL_SIP_SMC_FIRMWARE_VERSION;
a1 = 0;
a2 = 0;
break;
/* for FCS */
case COMMAND_FCS_DATA_ENCRYPTION:
a0 = INTEL_SIP_SMC_FCS_CRYPTION;
a1 = 1;
a2 = (unsigned long)pdata->paddr;
a3 = (unsigned long)pdata->size;
a4 = (unsigned long)pdata->paddr_output;
a5 = (unsigned long)pdata->size_output;
break;
case COMMAND_FCS_DATA_DECRYPTION:
a0 = INTEL_SIP_SMC_FCS_CRYPTION;
a1 = 0;
a2 = (unsigned long)pdata->paddr;
a3 = (unsigned long)pdata->size;
a4 = (unsigned long)pdata->paddr_output;
a5 = (unsigned long)pdata->size_output;
break;
case COMMAND_FCS_RANDOM_NUMBER_GEN:
a0 = INTEL_SIP_SMC_FCS_RANDOM_NUMBER;
a1 = (unsigned long)pdata->paddr;
a2 = 0;
break;
case COMMAND_FCS_REQUEST_SERVICE:
a0 = INTEL_SIP_SMC_FCS_SERVICE_REQUEST;
a1 = (unsigned long)pdata->paddr;
a2 = (unsigned long)pdata->size;
break;
case COMMAND_FCS_SEND_CERTIFICATE:
a0 = INTEL_SIP_SMC_FCS_SEND_CERTIFICATE;
a1 = (unsigned long)pdata->paddr;
a2 = (unsigned long)pdata->size;
break;
case COMMAND_FCS_GET_PROVISION_DATA:
a0 = INTEL_SIP_SMC_FCS_GET_PROVISION_DATA;
a1 = (unsigned long)pdata->paddr;
a2 = 0;
break;
/* for polling */
case COMMAND_POLL_SERVICE_STATUS:
a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
a1 = (unsigned long)pdata->paddr;
a2 = (unsigned long)pdata->size;
break;
case COMMAND_RSU_DCMF_STATUS:
a0 = INTEL_SIP_SMC_RSU_DCMF_STATUS;
a1 = 0;
a2 = 0;
break;
case COMMAND_SMC_SVC_VERSION:
a0 = INTEL_SIP_SMC_SVC_VERSION;
a1 = 0;
a2 = 0;
break;
case COMMAND_MBOX_SEND_CMD:
a0 = INTEL_SIP_SMC_MBOX_SEND_CMD;
a1 = pdata->arg[0];
a2 = (unsigned long)pdata->paddr;
a3 = (unsigned long)pdata->size / BYTE_TO_WORD_SIZE;
a4 = pdata->arg[1];
a5 = (unsigned long)pdata->paddr_output;
a6 = (unsigned long)pdata->size_output / BYTE_TO_WORD_SIZE;
break;
default:
pr_warn("it shouldn't happen\n");
break;
}
pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
__func__,
(unsigned int)a0,
(unsigned int)a1);
pr_debug(" a2=0x%016x\n", (unsigned int)a2);
pr_debug(" a3=0x%016x\n", (unsigned int)a3);
pr_debug(" a4=0x%016x\n", (unsigned int)a4);
pr_debug(" a5=0x%016x\n", (unsigned int)a5);
ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res);
pr_debug("%s: after SMC call -- res.a0=0x%016x",
__func__, (unsigned int)res.a0);
pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
(unsigned int)res.a1, (unsigned int)res.a2);
pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
if (pdata->command == COMMAND_RSU_STATUS) {
if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
cbdata->status = BIT(SVC_STATUS_ERROR);
else
cbdata->status = BIT(SVC_STATUS_OK);
cbdata->kaddr1 = &res;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
continue;
}
switch (res.a0) {
case INTEL_SIP_SMC_STATUS_OK:
svc_thread_recv_status_ok(pdata, cbdata, res);
break;
case INTEL_SIP_SMC_STATUS_BUSY:
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_SUBMIT:
svc_thread_cmd_data_claim(ctrl,
pdata, cbdata);
break;
case COMMAND_RECONFIG_STATUS:
case COMMAND_POLL_SERVICE_STATUS:
svc_thread_cmd_config_status(ctrl,
pdata, cbdata);
break;
default:
pr_warn("it shouldn't happen\n");
break;
}
break;
case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
/* for FCS */
switch (pdata->command) {
case COMMAND_FCS_REQUEST_SERVICE:
case COMMAND_FCS_SEND_CERTIFICATE:
case COMMAND_FCS_GET_PROVISION_DATA:
case COMMAND_FCS_DATA_ENCRYPTION:
case COMMAND_FCS_DATA_DECRYPTION:
case COMMAND_FCS_RANDOM_NUMBER_GEN:
case COMMAND_MBOX_SEND_CMD:
cbdata->status = BIT(SVC_STATUS_INVALID_PARAM);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl,
cbdata);
break;
}
break;
case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = &res.a1;
cbdata->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
cbdata->kaddr3 = (res.a3) ? &res.a3 : NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
pr_warn("Secure firmware doesn't support...\n");
/*
* be compatible with older version firmware which
* doesn't support newer RSU commands
*/
if ((pdata->command != COMMAND_RSU_UPDATE) &&
(pdata->command != COMMAND_RSU_STATUS)) {
cbdata->status =
BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
pdata->chan->scl->receive_cb(
pdata->chan->scl, cbdata);
}
break;
}
}
kfree(cbdata);
kfree(pdata);
return 0;
}
/**
* svc_normal_to_secure_shm_thread() - the function to run in the kthread
* @data: data pointer for kthread function
*
* Service layer driver creates stratix10_svc_smc_hvc_shm kthread on CPU
* node 0, its function stratix10_svc_secure_shm_thread is used to query the
* physical address of memory block reserved by secure monitor software at
* secure world.
*
* svc_normal_to_secure_shm_thread() terminates directly since it is a
* standlone thread for which no one will call kthread_stop() or return when
* 'kthread_should_stop()' is true.
*/
static int svc_normal_to_secure_shm_thread(void *data)
{
struct stratix10_svc_sh_memory
*sh_mem = (struct stratix10_svc_sh_memory *)data;
struct arm_smccc_res res;
/* SMC or HVC call to get shared memory info from secure world */
sh_mem->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM,
0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == INTEL_SIP_SMC_STATUS_OK) {
sh_mem->addr = res.a1;
sh_mem->size = res.a2;
} else {
pr_err("%s: after SMC call -- res.a0=0x%016x", __func__,
(unsigned int)res.a0);
sh_mem->addr = 0;
sh_mem->size = 0;
}
complete(&sh_mem->sync_complete);
return 0;
}
/**
* svc_get_sh_memory() - get memory block reserved by secure monitor SW
* @pdev: pointer to service layer device
* @sh_memory: pointer to service shared memory structure
*
* Return: zero for successfully getting the physical address of memory block
* reserved by secure monitor software, or negative value on error.
*/
static int svc_get_sh_memory(struct platform_device *pdev,
struct stratix10_svc_sh_memory *sh_memory)
{
struct device *dev = &pdev->dev;
struct task_struct *sh_memory_task;
unsigned int cpu = 0;
init_completion(&sh_memory->sync_complete);
/* smc or hvc call happens on cpu 0 bound kthread */
sh_memory_task = kthread_create_on_node(svc_normal_to_secure_shm_thread,
(void *)sh_memory,
cpu_to_node(cpu),
"svc_smc_hvc_shm_thread");
if (IS_ERR(sh_memory_task)) {
dev_err(dev, "fail to create stratix10_svc_smc_shm_thread\n");
return -EINVAL;
}
wake_up_process(sh_memory_task);
if (!wait_for_completion_timeout(&sh_memory->sync_complete, 10 * HZ)) {
dev_err(dev,
"timeout to get sh-memory paras from secure world\n");
return -ETIMEDOUT;
}
if (!sh_memory->addr || !sh_memory->size) {
dev_err(dev,
"failed to get shared memory info from secure world\n");
return -ENOMEM;
}
dev_dbg(dev, "SM software provides paddr: 0x%016x, size: 0x%08x\n",
(unsigned int)sh_memory->addr,
(unsigned int)sh_memory->size);
return 0;
}
/**
* svc_create_memory_pool() - create a memory pool from reserved memory block
* @pdev: pointer to service layer device
* @sh_memory: pointer to service shared memory structure
*
* Return: pool allocated from reserved memory block or ERR_PTR() on error.
*/
static struct gen_pool *
svc_create_memory_pool(struct platform_device *pdev,
struct stratix10_svc_sh_memory *sh_memory)
{
struct device *dev = &pdev->dev;
struct gen_pool *genpool;
unsigned long vaddr;
phys_addr_t paddr;
size_t size;
phys_addr_t begin;
phys_addr_t end;
void *va;
size_t page_mask = PAGE_SIZE - 1;
int min_alloc_order = 3;
int ret;
begin = roundup(sh_memory->addr, PAGE_SIZE);
end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
paddr = begin;
size = end - begin;
va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
if (IS_ERR(va)) {
dev_err(dev, "fail to remap shared memory\n");
return ERR_PTR(-EINVAL);
}
vaddr = (unsigned long)va;
dev_dbg(dev,
"reserved memory vaddr: %p, paddr: 0x%16x size: 0x%8x\n",
va, (unsigned int)paddr, (unsigned int)size);
if ((vaddr & page_mask) || (paddr & page_mask) ||
(size & page_mask)) {
dev_err(dev, "page is not aligned\n");
return ERR_PTR(-EINVAL);
}
genpool = gen_pool_create(min_alloc_order, -1);
if (!genpool) {
dev_err(dev, "fail to create genpool\n");
return ERR_PTR(-ENOMEM);
}
gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
ret = gen_pool_add_virt(genpool, vaddr, paddr, size, -1);
if (ret) {
dev_err(dev, "fail to add memory chunk to the pool\n");
gen_pool_destroy(genpool);
return ERR_PTR(ret);
}
return genpool;
}
/**
* svc_smccc_smc() - secure monitor call between normal and secure world
* @a0: argument passed in registers 0
* @a1: argument passed in registers 1
* @a2: argument passed in registers 2
* @a3: argument passed in registers 3
* @a4: argument passed in registers 4
* @a5: argument passed in registers 5
* @a6: argument passed in registers 6
* @a7: argument passed in registers 7
* @res: result values from register 0 to 3
*/
static void svc_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
/**
* svc_smccc_hvc() - hypervisor call between normal and secure world
* @a0: argument passed in registers 0
* @a1: argument passed in registers 1
* @a2: argument passed in registers 2
* @a3: argument passed in registers 3
* @a4: argument passed in registers 4
* @a5: argument passed in registers 5
* @a6: argument passed in registers 6
* @a7: argument passed in registers 7
* @res: result values from register 0 to 3
*/
static void svc_smccc_hvc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7,
struct arm_smccc_res *res)
{
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
/**
* get_invoke_func() - invoke SMC or HVC call
* @dev: pointer to device
*
* Return: function pointer to svc_smccc_smc or svc_smccc_hvc.
*/
static svc_invoke_fn *get_invoke_func(struct device *dev)
{
const char *method;
if (of_property_read_string(dev->of_node, "method", &method)) {
dev_warn(dev, "missing \"method\" property\n");
return ERR_PTR(-ENXIO);
}
if (!strcmp(method, "smc"))
return svc_smccc_smc;
if (!strcmp(method, "hvc"))
return svc_smccc_hvc;
dev_warn(dev, "invalid \"method\" property: %s\n", method);
return ERR_PTR(-EINVAL);
}
/**
* stratix10_svc_request_channel_byname() - request a service channel
* @client: pointer to service client
* @name: service client name
*
* This function is used by service client to request a service channel.
*
* Return: a pointer to channel assigned to the client on success,
* or ERR_PTR() on error.
*/
struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
struct stratix10_svc_client *client, const char *name)
{
struct device *dev = client->dev;
struct stratix10_svc_controller *controller;
struct stratix10_svc_chan *chan = NULL;
unsigned long flag;
int i;
/* if probe was called after client's, or error on probe */
if (list_empty(&svc_ctrl))
return ERR_PTR(-EPROBE_DEFER);
controller = list_first_entry(&svc_ctrl,
struct stratix10_svc_controller, node);
for (i = 0; i < SVC_NUM_CHANNEL; i++) {
if (!strcmp(controller->chans[i].name, name)) {
chan = &controller->chans[i];
break;
}
}
/* if there was no channel match */
if (i == SVC_NUM_CHANNEL) {
dev_err(dev, "%s: channel not allocated\n", __func__);
return ERR_PTR(-EINVAL);
}
if (chan->scl || !try_module_get(controller->dev->driver->owner)) {
dev_dbg(dev, "%s: svc not free\n", __func__);
return ERR_PTR(-EBUSY);
}
spin_lock_irqsave(&chan->lock, flag);
chan->scl = client;
chan->ctrl->num_active_client++;
spin_unlock_irqrestore(&chan->lock, flag);
return chan;
}
EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
/**
* stratix10_svc_free_channel() - free service channel
* @chan: service channel to be freed
*
* This function is used by service client to free a service channel.
*/
void stratix10_svc_free_channel(struct stratix10_svc_chan *chan)
{
unsigned long flag;
spin_lock_irqsave(&chan->lock, flag);
chan->scl = NULL;
chan->ctrl->num_active_client--;
module_put(chan->ctrl->dev->driver->owner);
spin_unlock_irqrestore(&chan->lock, flag);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_channel);
/**
* stratix10_svc_send() - send a message data to the remote
* @chan: service channel assigned to the client
* @msg: message data to be sent, in the format of
* "struct stratix10_svc_client_msg"
*
* This function is used by service client to add a message to the service
* layer driver's queue for being sent to the secure world.
*
* Return: 0 for success, -ENOMEM or -ENOBUFS on error.
*/
int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
{
struct stratix10_svc_client_msg
*p_msg = (struct stratix10_svc_client_msg *)msg;
struct stratix10_svc_data_mem *p_mem;
struct stratix10_svc_data *p_data;
int ret = 0;
unsigned int cpu = 0;
p_data = kzalloc(sizeof(*p_data), GFP_KERNEL);
if (!p_data)
return -ENOMEM;
/* first client will create kernel thread */
if (!chan->ctrl->task) {
chan->ctrl->task =
kthread_create_on_node(svc_normal_to_secure_thread,
(void *)chan->ctrl,
cpu_to_node(cpu),
"svc_smc_hvc_thread");
if (IS_ERR(chan->ctrl->task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
kthread_bind(chan->ctrl->task, cpu);
wake_up_process(chan->ctrl->task);
}
pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
p_msg->payload, p_msg->command,
(unsigned int)p_msg->payload_length);
if (list_empty(&svc_data_mem)) {
if (p_msg->command == COMMAND_RECONFIG) {
struct stratix10_svc_command_config_type *ct =
(struct stratix10_svc_command_config_type *)
p_msg->payload;
p_data->flag = ct->flags;
}
} else {
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload) {
p_data->paddr = p_mem->paddr;
p_data->size = p_msg->payload_length;
break;
}
if (p_msg->payload_output) {
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload_output) {
p_data->paddr_output =
p_mem->paddr;
p_data->size_output =
p_msg->payload_length_output;
break;
}
}
}
p_data->command = p_msg->command;
p_data->arg[0] = p_msg->arg[0];
p_data->arg[1] = p_msg->arg[1];
p_data->arg[2] = p_msg->arg[2];
p_data->size = p_msg->payload_length;
p_data->chan = chan;
pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
(unsigned int)p_data->paddr, p_data->command,
(unsigned int)p_data->size);
ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
sizeof(*p_data),
&chan->ctrl->svc_fifo_lock);
kfree(p_data);
if (!ret)
return -ENOBUFS;
return 0;
}
EXPORT_SYMBOL_GPL(stratix10_svc_send);
/**
* stratix10_svc_done() - complete service request transactions
* @chan: service channel assigned to the client
*
* This function should be called when client has finished its request
* or there is an error in the request process. It allows the service layer
* to stop the running thread to have maximize savings in kernel resources.
*/
void stratix10_svc_done(struct stratix10_svc_chan *chan)
{
/* stop thread when thread is running AND only one active client */
if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
pr_debug("svc_smc_hvc_shm_thread is stopped\n");
kthread_stop(chan->ctrl->task);
chan->ctrl->task = NULL;
}
}
EXPORT_SYMBOL_GPL(stratix10_svc_done);
/**
* stratix10_svc_allocate_memory() - allocate memory
* @chan: service channel assigned to the client
* @size: memory size requested by a specific service client
*
* Service layer allocates the requested number of bytes buffer from the
* memory pool, service client uses this function to get allocated buffers.
*
* Return: address of allocated memory on success, or ERR_PTR() on error.
*/
void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
size_t size)
{
struct stratix10_svc_data_mem *pmem;
unsigned long va;
phys_addr_t pa;
struct gen_pool *genpool = chan->ctrl->genpool;
size_t s = roundup(size, 1 << genpool->min_alloc_order);
pmem = devm_kzalloc(chan->ctrl->dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
return ERR_PTR(-ENOMEM);
va = gen_pool_alloc(genpool, s);
if (!va)
return ERR_PTR(-ENOMEM);
memset((void *)va, 0, s);
pa = gen_pool_virt_to_phys(genpool, va);
pmem->vaddr = (void *)va;
pmem->paddr = pa;
pmem->size = s;
list_add_tail(&pmem->node, &svc_data_mem);
pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
pmem->vaddr, (unsigned int)pmem->paddr);
return (void *)va;
}
EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
/**
* stratix10_svc_free_memory() - free allocated memory
* @chan: service channel assigned to the client
* @kaddr: memory to be freed
*
* This function is used by service client to free allocated buffers.
*/
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
gen_pool_free(chan->ctrl->genpool,
(unsigned long)kaddr, pmem->size);
pmem->vaddr = NULL;
list_del(&pmem->node);
return;
}
list_del(&svc_data_mem);
}
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
static const struct of_device_id stratix10_svc_drv_match[] = {
{.compatible = "intel,stratix10-svc"},
{.compatible = "intel,agilex-svc"},
{},
};
static int stratix10_svc_drv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct stratix10_svc_controller *controller;
struct stratix10_svc_chan *chans;
struct gen_pool *genpool;
struct stratix10_svc_sh_memory *sh_memory;
struct stratix10_svc *svc;
svc_invoke_fn *invoke_fn;
size_t fifo_size;
int ret;
/* get SMC or HVC function */
invoke_fn = get_invoke_func(dev);
if (IS_ERR(invoke_fn))
return -EINVAL;
sh_memory = devm_kzalloc(dev, sizeof(*sh_memory), GFP_KERNEL);
if (!sh_memory)
return -ENOMEM;
sh_memory->invoke_fn = invoke_fn;
ret = svc_get_sh_memory(pdev, sh_memory);
if (ret)
return ret;
genpool = svc_create_memory_pool(pdev, sh_memory);
if (IS_ERR(genpool))
return PTR_ERR(genpool);
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
if (!controller) {
ret = -ENOMEM;
goto err_destroy_pool;
}
chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
if (!chans) {
ret = -ENOMEM;
goto err_destroy_pool;
}
controller->dev = dev;
controller->num_chans = SVC_NUM_CHANNEL;
controller->num_active_client = 0;
controller->chans = chans;
controller->genpool = genpool;
controller->task = NULL;
controller->invoke_fn = invoke_fn;
init_completion(&controller->complete_status);
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
goto err_destroy_pool;
}
spin_lock_init(&controller->svc_fifo_lock);
chans[0].scl = NULL;
chans[0].ctrl = controller;
chans[0].name = SVC_CLIENT_FPGA;
spin_lock_init(&chans[0].lock);
chans[1].scl = NULL;
chans[1].ctrl = controller;
chans[1].name = SVC_CLIENT_RSU;
spin_lock_init(&chans[1].lock);
chans[2].scl = NULL;
chans[2].ctrl = controller;
chans[2].name = SVC_CLIENT_FCS;
spin_lock_init(&chans[2].lock);
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
/* add svc client device(s) */
svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
if (!svc) {
ret = -ENOMEM;
goto err_free_kfifo;
}
svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
if (!svc->stratix10_svc_rsu) {
dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
ret = -ENOMEM;
goto err_free_kfifo;
}
ret = platform_device_add(svc->stratix10_svc_rsu);
if (ret) {
platform_device_put(svc->stratix10_svc_rsu);
goto err_free_kfifo;
}
svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
ret = -ENOMEM;
goto err_unregister_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
goto err_unregister_dev;
}
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
err_unregister_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
err_destroy_pool:
gen_pool_destroy(genpool);
return ret;
}
static int stratix10_svc_drv_remove(struct platform_device *pdev)
{
struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
kfifo_free(&ctrl->svc_fifo);
if (ctrl->task) {
kthread_stop(ctrl->task);
ctrl->task = NULL;
}
if (ctrl->genpool)
gen_pool_destroy(ctrl->genpool);
list_del(&ctrl->node);
return 0;
}
static struct platform_driver stratix10_svc_driver = {
.probe = stratix10_svc_drv_probe,
.remove = stratix10_svc_drv_remove,
.driver = {
.name = "stratix10-svc",
.of_match_table = stratix10_svc_drv_match,
},
};
static int __init stratix10_svc_init(void)
{
struct device_node *fw_np;
struct device_node *np;
int ret;
fw_np = of_find_node_by_name(NULL, "firmware");
if (!fw_np)
return -ENODEV;
np = of_find_matching_node(fw_np, stratix10_svc_drv_match);
if (!np)
return -ENODEV;
of_node_put(np);
ret = of_platform_populate(fw_np, stratix10_svc_drv_match, NULL, NULL);
if (ret)
return ret;
return platform_driver_register(&stratix10_svc_driver);
}
static void __exit stratix10_svc_exit(void)
{
return platform_driver_unregister(&stratix10_svc_driver);
}
subsys_initcall(stratix10_svc_init);
module_exit(stratix10_svc_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Stratix10 Service Layer Driver");
MODULE_AUTHOR("Richard Gong <[email protected]>");
MODULE_ALIAS("platform:stratix10-svc");
| linux-master | drivers/firmware/stratix10-svc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2019, Intel Corporation
*/
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/firmware/intel/stratix10-svc-client.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#define RSU_STATE_MASK GENMASK_ULL(31, 0)
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF3_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF0_STATUS_MASK GENMASK_ULL(15, 0)
#define RSU_DCMF1_STATUS_MASK GENMASK_ULL(31, 16)
#define RSU_DCMF2_STATUS_MASK GENMASK_ULL(47, 32)
#define RSU_DCMF3_STATUS_MASK GENMASK_ULL(63, 48)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
#define INVALID_RETRY_COUNTER 0xFF
#define INVALID_DCMF_VERSION 0xFF
#define INVALID_DCMF_STATUS 0xFFFFFFFF
#define INVALID_SPT_ADDRESS 0x0
#define RSU_GET_SPT_CMD 0x5A
#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int))
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
/**
* struct stratix10_rsu_priv - rsu data structure
* @chan: pointer to the allocated service channel
* @client: active service client
* @completion: state for callback completion
* @lock: a mutex to protect callback completion state
* @status.current_image: address of image currently running in flash
* @status.fail_image: address of failed image in flash
* @status.version: the interface version number of RSU firmware
* @status.state: the state of RSU system
* @status.error_details: error code
* @status.error_location: the error offset inside the image that failed
* @dcmf_version.dcmf0: Quartus dcmf0 version
* @dcmf_version.dcmf1: Quartus dcmf1 version
* @dcmf_version.dcmf2: Quartus dcmf2 version
* @dcmf_version.dcmf3: Quartus dcmf3 version
* @dcmf_status.dcmf0: dcmf0 status
* @dcmf_status.dcmf1: dcmf1 status
* @dcmf_status.dcmf2: dcmf2 status
* @dcmf_status.dcmf3: dcmf3 status
* @retry_counter: the current image's retry counter
* @max_retry: the preset max retry value
* @spt0_address: address of spt0
* @spt1_address: address of spt1
* @get_spt_response_buf: response from sdm for get_spt command
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
struct stratix10_svc_client client;
struct completion completion;
struct mutex lock;
struct {
unsigned long current_image;
unsigned long fail_image;
unsigned int version;
unsigned int state;
unsigned int error_details;
unsigned int error_location;
} status;
struct {
unsigned int dcmf0;
unsigned int dcmf1;
unsigned int dcmf2;
unsigned int dcmf3;
} dcmf_version;
struct {
unsigned int dcmf0;
unsigned int dcmf1;
unsigned int dcmf2;
unsigned int dcmf3;
} dcmf_status;
unsigned int retry_counter;
unsigned int max_retry;
unsigned long spt0_address;
unsigned long spt1_address;
unsigned int *get_spt_response_buf;
};
/**
* rsu_status_callback() - Status callback from Intel Service Layer
* @client: pointer to service client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for RSU status request. Status is
* only updated after a system reboot, so a get updated status call is
* made during driver probe.
*/
static void rsu_status_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
if (data->status == BIT(SVC_STATUS_OK)) {
priv->status.version = FIELD_GET(RSU_VERSION_MASK,
res->a2);
priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
priv->status.fail_image = res->a1;
priv->status.current_image = res->a0;
priv->status.error_location =
FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
priv->status.error_details =
FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
} else {
dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
res->a0);
priv->status.version = 0;
priv->status.state = 0;
priv->status.fail_image = 0;
priv->status.current_image = 0;
priv->status.error_location = 0;
priv->status.error_details = 0;
}
complete(&priv->completion);
}
/**
* rsu_command_callback() - Update callback from Intel Service Layer
* @client: pointer to client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for RSU commands.
*/
static void rsu_command_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support notify\n");
else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
complete(&priv->completion);
}
/**
* rsu_retry_callback() - Callback from Intel service layer for getting
* the current image's retry counter from the firmware
* @client: pointer to client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for retry counter, which is used by
* user to know how many times the images is still allowed to reload
* itself before giving up and starting RSU fail-over flow.
*/
static void rsu_retry_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *counter = (unsigned int *)data->kaddr1;
if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
BIT(data->status));
complete(&priv->completion);
}
/**
* rsu_max_retry_callback() - Callback from Intel service layer for getting
* the max retry value from the firmware
* @client: pointer to client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for max retry.
*/
static void rsu_max_retry_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *max_retry = (unsigned int *)data->kaddr1;
if (data->status == BIT(SVC_STATUS_OK))
priv->max_retry = *max_retry;
else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support max retry\n");
else
dev_err(client->dev, "Failed to get max retry %lu\n",
BIT(data->status));
complete(&priv->completion);
}
/**
* rsu_dcmf_version_callback() - Callback from Intel service layer for getting
* the DCMF version
* @client: pointer to client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for DCMF version number
*/
static void rsu_dcmf_version_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
unsigned long long *value1 = (unsigned long long *)data->kaddr1;
unsigned long long *value2 = (unsigned long long *)data->kaddr2;
if (data->status == BIT(SVC_STATUS_OK)) {
priv->dcmf_version.dcmf0 = FIELD_GET(RSU_DCMF0_MASK, *value1);
priv->dcmf_version.dcmf1 = FIELD_GET(RSU_DCMF1_MASK, *value1);
priv->dcmf_version.dcmf2 = FIELD_GET(RSU_DCMF2_MASK, *value2);
priv->dcmf_version.dcmf3 = FIELD_GET(RSU_DCMF3_MASK, *value2);
} else
dev_err(client->dev, "failed to get DCMF version\n");
complete(&priv->completion);
}
/**
* rsu_dcmf_status_callback() - Callback from Intel service layer for getting
* the DCMF status
* @client: pointer to client
* @data: pointer to callback data structure
*
* Callback from Intel service layer for DCMF status
*/
static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
unsigned long long *value = (unsigned long long *)data->kaddr1;
if (data->status == BIT(SVC_STATUS_OK)) {
priv->dcmf_status.dcmf0 = FIELD_GET(RSU_DCMF0_STATUS_MASK,
*value);
priv->dcmf_status.dcmf1 = FIELD_GET(RSU_DCMF1_STATUS_MASK,
*value);
priv->dcmf_status.dcmf2 = FIELD_GET(RSU_DCMF2_STATUS_MASK,
*value);
priv->dcmf_status.dcmf3 = FIELD_GET(RSU_DCMF3_STATUS_MASK,
*value);
} else
dev_err(client->dev, "failed to get DCMF status\n");
complete(&priv->completion);
}
static void rsu_get_spt_callback(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data)
{
struct stratix10_rsu_priv *priv = client->priv;
unsigned long *mbox_err = (unsigned long *)data->kaddr1;
unsigned long *resp_len = (unsigned long *)data->kaddr2;
if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) ||
(*resp_len != RSU_GET_SPT_RESP_LEN))
goto error;
priv->spt0_address = priv->get_spt_response_buf[0];
priv->spt0_address <<= 32;
priv->spt0_address |= priv->get_spt_response_buf[1];
priv->spt1_address = priv->get_spt_response_buf[2];
priv->spt1_address <<= 32;
priv->spt1_address |= priv->get_spt_response_buf[3];
goto complete;
error:
dev_err(client->dev, "failed to get SPTs\n");
complete:
stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf);
priv->get_spt_response_buf = NULL;
complete(&priv->completion);
}
/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
* @command: RSU status or update command
* @arg: the request argument, the bitstream address or notify status
* @callback: function pointer for the callback (status or update)
*
* Start an Intel service layer transaction to perform the SMC call that
* is necessary to get RSU boot log or set the address of bitstream to
* boot after reboot.
*
* Returns 0 on success or -ETIMEDOUT on error.
*/
static int rsu_send_msg(struct stratix10_rsu_priv *priv,
enum stratix10_svc_command_code command,
unsigned long arg,
rsu_callback callback)
{
struct stratix10_svc_client_msg msg;
int ret;
mutex_lock(&priv->lock);
reinit_completion(&priv->completion);
priv->client.receive_cb = callback;
msg.command = command;
if (arg)
msg.arg[0] = arg;
if (command == COMMAND_MBOX_SEND_CMD) {
msg.arg[1] = 0;
msg.payload = NULL;
msg.payload_length = 0;
msg.payload_output = priv->get_spt_response_buf;
msg.payload_length_output = RSU_GET_SPT_RESP_LEN;
}
ret = stratix10_svc_send(priv->chan, &msg);
if (ret < 0)
goto status_done;
ret = wait_for_completion_interruptible_timeout(&priv->completion,
RSU_TIMEOUT);
if (!ret) {
dev_err(priv->client.dev,
"timeout waiting for SMC call\n");
ret = -ETIMEDOUT;
goto status_done;
} else if (ret < 0) {
dev_err(priv->client.dev,
"error %d waiting for SMC call\n", ret);
goto status_done;
} else {
ret = 0;
}
status_done:
stratix10_svc_done(priv->chan);
mutex_unlock(&priv->lock);
return ret;
}
/*
* This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
* The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
* related. They allow user space software to query the configuration system
* status and to request optional reboot behavior specific to Intel FPGAs.
*/
static ssize_t current_image_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08lx\n", priv->status.current_image);
}
static ssize_t fail_image_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08lx\n", priv->status.fail_image);
}
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->status.version);
}
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->status.state);
}
static ssize_t error_location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->status.error_location);
}
static ssize_t error_details_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->status.error_details);
}
static ssize_t retry_counter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->retry_counter);
}
static ssize_t max_retry_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return scnprintf(buf, sizeof(priv->max_retry),
"0x%08x\n", priv->max_retry);
}
static ssize_t dcmf0_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf0);
}
static ssize_t dcmf1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf1);
}
static ssize_t dcmf2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf2);
}
static ssize_t dcmf3_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "0x%08x\n", priv->dcmf_version.dcmf3);
}
static ssize_t dcmf0_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->dcmf_status.dcmf0 == INVALID_DCMF_STATUS)
return -EIO;
return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf0);
}
static ssize_t dcmf1_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->dcmf_status.dcmf1 == INVALID_DCMF_STATUS)
return -EIO;
return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf1);
}
static ssize_t dcmf2_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->dcmf_status.dcmf2 == INVALID_DCMF_STATUS)
return -EIO;
return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf2);
}
static ssize_t dcmf3_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->dcmf_status.dcmf3 == INVALID_DCMF_STATUS)
return -EIO;
return sprintf(buf, "0x%08x\n", priv->dcmf_status.dcmf3);
}
static ssize_t reboot_image_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
unsigned long address;
int ret;
if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &address);
if (ret)
return ret;
ret = rsu_send_msg(priv, COMMAND_RSU_UPDATE,
address, rsu_command_callback);
if (ret) {
dev_err(dev, "Error, RSU update returned %i\n", ret);
return ret;
}
return count;
}
static ssize_t notify_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
unsigned long status;
int ret;
if (!priv)
return -ENODEV;
ret = kstrtoul(buf, 0, &status);
if (ret)
return ret;
ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
status, rsu_command_callback);
if (ret) {
dev_err(dev, "Error, RSU notify returned %i\n", ret);
return ret;
}
/* to get the updated state */
ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
0, rsu_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
return ret;
}
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
return ret;
}
return count;
}
static ssize_t spt0_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->spt0_address == INVALID_SPT_ADDRESS)
return -EIO;
return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address);
}
static ssize_t spt1_address_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
if (priv->spt1_address == INVALID_SPT_ADDRESS)
return -EIO;
return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address);
}
static DEVICE_ATTR_RO(current_image);
static DEVICE_ATTR_RO(fail_image);
static DEVICE_ATTR_RO(state);
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(error_location);
static DEVICE_ATTR_RO(error_details);
static DEVICE_ATTR_RO(retry_counter);
static DEVICE_ATTR_RO(max_retry);
static DEVICE_ATTR_RO(dcmf0);
static DEVICE_ATTR_RO(dcmf1);
static DEVICE_ATTR_RO(dcmf2);
static DEVICE_ATTR_RO(dcmf3);
static DEVICE_ATTR_RO(dcmf0_status);
static DEVICE_ATTR_RO(dcmf1_status);
static DEVICE_ATTR_RO(dcmf2_status);
static DEVICE_ATTR_RO(dcmf3_status);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
static DEVICE_ATTR_RO(spt0_address);
static DEVICE_ATTR_RO(spt1_address);
static struct attribute *rsu_attrs[] = {
&dev_attr_current_image.attr,
&dev_attr_fail_image.attr,
&dev_attr_state.attr,
&dev_attr_version.attr,
&dev_attr_error_location.attr,
&dev_attr_error_details.attr,
&dev_attr_retry_counter.attr,
&dev_attr_max_retry.attr,
&dev_attr_dcmf0.attr,
&dev_attr_dcmf1.attr,
&dev_attr_dcmf2.attr,
&dev_attr_dcmf3.attr,
&dev_attr_dcmf0_status.attr,
&dev_attr_dcmf1_status.attr,
&dev_attr_dcmf2_status.attr,
&dev_attr_dcmf3_status.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
&dev_attr_spt0_address.attr,
&dev_attr_spt1_address.attr,
NULL
};
ATTRIBUTE_GROUPS(rsu);
static int stratix10_rsu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct stratix10_rsu_priv *priv;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->client.dev = dev;
priv->client.receive_cb = NULL;
priv->client.priv = priv;
priv->status.current_image = 0;
priv->status.fail_image = 0;
priv->status.error_location = 0;
priv->status.error_details = 0;
priv->status.version = 0;
priv->status.state = 0;
priv->retry_counter = INVALID_RETRY_COUNTER;
priv->dcmf_version.dcmf0 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS;
priv->max_retry = INVALID_RETRY_COUNTER;
priv->spt0_address = INVALID_SPT_ADDRESS;
priv->spt1_address = INVALID_SPT_ADDRESS;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
SVC_CLIENT_RSU);
if (IS_ERR(priv->chan)) {
dev_err(dev, "couldn't get service channel %s\n",
SVC_CLIENT_RSU);
return PTR_ERR(priv->chan);
}
init_completion(&priv->completion);
platform_set_drvdata(pdev, priv);
/* get the initial state from firmware */
ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
0, rsu_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
/* get DCMF version from firmware */
ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_VERSION,
0, rsu_dcmf_version_callback);
if (ret) {
dev_err(dev, "Error, getting DCMF version %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
ret = rsu_send_msg(priv, COMMAND_RSU_DCMF_STATUS,
0, rsu_dcmf_status_callback);
if (ret) {
dev_err(dev, "Error, getting DCMF status %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU retry %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
rsu_max_retry_callback);
if (ret) {
dev_err(dev, "Error, getting RSU max retry %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
priv->get_spt_response_buf =
stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN);
if (IS_ERR(priv->get_spt_response_buf)) {
dev_err(dev, "failed to allocate get spt buffer\n");
} else {
ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD,
RSU_GET_SPT_CMD, rsu_get_spt_callback);
if (ret) {
dev_err(dev, "Error, getting SPT table %i\n", ret);
stratix10_svc_free_channel(priv->chan);
}
}
return ret;
}
static int stratix10_rsu_remove(struct platform_device *pdev)
{
struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev);
stratix10_svc_free_channel(priv->chan);
return 0;
}
static struct platform_driver stratix10_rsu_driver = {
.probe = stratix10_rsu_probe,
.remove = stratix10_rsu_remove,
.driver = {
.name = "stratix10-rsu",
.dev_groups = rsu_groups,
},
};
module_platform_driver(stratix10_rsu_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Remote System Update Driver");
MODULE_AUTHOR("Richard Gong <[email protected]>");
| linux-master | drivers/firmware/stratix10-rsu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* dmi-sysfs.c
*
* This module exports the DMI tables read-only to userspace through the
* sysfs file system.
*
* Data is currently found below
* /sys/firmware/dmi/...
*
* DMI attributes are presented in attribute files with names
* formatted using %d-%d, so that the first integer indicates the
* structure type (0-255), and the second field is the instance of that
* entry.
*
* Copyright 2011 Google, Inc.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kobject.h>
#include <linux/dmi.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
#include <asm/dmi.h>
#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
the top entry type is only 8 bits */
struct dmi_sysfs_entry {
struct dmi_header dh;
struct kobject kobj;
int instance;
int position;
struct list_head list;
struct kobject *child;
};
/*
* Global list of dmi_sysfs_entry. Even though this should only be
* manipulated at setup and teardown, the lazy nature of the kobject
* system means we get lazy removes.
*/
static LIST_HEAD(entry_list);
static DEFINE_SPINLOCK(entry_list_lock);
/* dmi_sysfs_attribute - Top level attribute. used by all entries. */
struct dmi_sysfs_attribute {
struct attribute attr;
ssize_t (*show)(struct dmi_sysfs_entry *entry, char *buf);
};
#define DMI_SYSFS_ATTR(_entry, _name) \
struct dmi_sysfs_attribute dmi_sysfs_attr_##_entry##_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0400}, \
.show = dmi_sysfs_##_entry##_##_name, \
}
/*
* dmi_sysfs_mapped_attribute - Attribute where we require the entry be
* mapped in. Use in conjunction with dmi_sysfs_specialize_attr_ops.
*/
struct dmi_sysfs_mapped_attribute {
struct attribute attr;
ssize_t (*show)(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
char *buf);
};
#define DMI_SYSFS_MAPPED_ATTR(_entry, _name) \
struct dmi_sysfs_mapped_attribute dmi_sysfs_attr_##_entry##_##_name = { \
.attr = {.name = __stringify(_name), .mode = 0400}, \
.show = dmi_sysfs_##_entry##_##_name, \
}
/*************************************************
* Generic DMI entry support.
*************************************************/
static void dmi_entry_free(struct kobject *kobj)
{
kfree(kobj);
}
static struct dmi_sysfs_entry *to_entry(struct kobject *kobj)
{
return container_of(kobj, struct dmi_sysfs_entry, kobj);
}
static struct dmi_sysfs_attribute *to_attr(struct attribute *attr)
{
return container_of(attr, struct dmi_sysfs_attribute, attr);
}
static ssize_t dmi_sysfs_attr_show(struct kobject *kobj,
struct attribute *_attr, char *buf)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_sysfs_attribute *attr = to_attr(_attr);
/* DMI stuff is only ever admin visible */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return attr->show(entry, buf);
}
static const struct sysfs_ops dmi_sysfs_attr_ops = {
.show = dmi_sysfs_attr_show,
};
typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *,
const struct dmi_header *dh, void *);
struct find_dmi_data {
struct dmi_sysfs_entry *entry;
dmi_callback callback;
void *private;
int instance_countdown;
ssize_t ret;
};
static void find_dmi_entry_helper(const struct dmi_header *dh,
void *_data)
{
struct find_dmi_data *data = _data;
struct dmi_sysfs_entry *entry = data->entry;
/* Is this the entry we want? */
if (dh->type != entry->dh.type)
return;
if (data->instance_countdown != 0) {
/* try the next instance? */
data->instance_countdown--;
return;
}
/*
* Don't ever revisit the instance. Short circuit later
* instances by letting the instance_countdown run negative
*/
data->instance_countdown--;
/* Found the entry */
data->ret = data->callback(entry, dh, data->private);
}
/* State for passing the read parameters through dmi_find_entry() */
struct dmi_read_state {
char *buf;
loff_t pos;
size_t count;
};
static ssize_t find_dmi_entry(struct dmi_sysfs_entry *entry,
dmi_callback callback, void *private)
{
struct find_dmi_data data = {
.entry = entry,
.callback = callback,
.private = private,
.instance_countdown = entry->instance,
.ret = -EIO, /* To signal the entry disappeared */
};
int ret;
ret = dmi_walk(find_dmi_entry_helper, &data);
/* This shouldn't happen, but just in case. */
if (ret)
return -EINVAL;
return data.ret;
}
/*
* Calculate and return the byte length of the dmi entry identified by
* dh. This includes both the formatted portion as well as the
* unformatted string space, including the two trailing nul characters.
*/
static size_t dmi_entry_length(const struct dmi_header *dh)
{
const char *p = (const char *)dh;
p += dh->length;
while (p[0] || p[1])
p++;
return 2 + p - (const char *)dh;
}
/*************************************************
* Support bits for specialized DMI entry support
*************************************************/
struct dmi_entry_attr_show_data {
struct attribute *attr;
char *buf;
};
static ssize_t dmi_entry_attr_show_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_data)
{
struct dmi_entry_attr_show_data *data = _data;
struct dmi_sysfs_mapped_attribute *attr;
attr = container_of(data->attr,
struct dmi_sysfs_mapped_attribute, attr);
return attr->show(entry, dh, data->buf);
}
static ssize_t dmi_entry_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct dmi_entry_attr_show_data data = {
.attr = attr,
.buf = buf,
};
/* Find the entry according to our parent and call the
* normalized show method hanging off of the attribute */
return find_dmi_entry(to_entry(kobj->parent),
dmi_entry_attr_show_helper, &data);
}
static const struct sysfs_ops dmi_sysfs_specialize_attr_ops = {
.show = dmi_entry_attr_show,
};
/*************************************************
* Specialized DMI entry support.
*************************************************/
/*** Type 15 - System Event Table ***/
#define DMI_SEL_ACCESS_METHOD_IO8 0x00
#define DMI_SEL_ACCESS_METHOD_IO2x8 0x01
#define DMI_SEL_ACCESS_METHOD_IO16 0x02
#define DMI_SEL_ACCESS_METHOD_PHYS32 0x03
#define DMI_SEL_ACCESS_METHOD_GPNV 0x04
struct dmi_system_event_log {
struct dmi_header header;
u16 area_length;
u16 header_start_offset;
u16 data_start_offset;
u8 access_method;
u8 status;
u32 change_token;
union {
struct {
u16 index_addr;
u16 data_addr;
} io;
u32 phys_addr32;
u16 gpnv_handle;
u32 access_method_address;
};
u8 header_format;
u8 type_descriptors_supported_count;
u8 per_log_type_descriptor_length;
u8 supported_log_type_descriptos[];
} __packed;
#define DMI_SYSFS_SEL_FIELD(_field) \
static ssize_t dmi_sysfs_sel_##_field(struct dmi_sysfs_entry *entry, \
const struct dmi_header *dh, \
char *buf) \
{ \
struct dmi_system_event_log sel; \
if (sizeof(sel) > dmi_entry_length(dh)) \
return -EIO; \
memcpy(&sel, dh, sizeof(sel)); \
return sprintf(buf, "%u\n", sel._field); \
} \
static DMI_SYSFS_MAPPED_ATTR(sel, _field)
DMI_SYSFS_SEL_FIELD(area_length);
DMI_SYSFS_SEL_FIELD(header_start_offset);
DMI_SYSFS_SEL_FIELD(data_start_offset);
DMI_SYSFS_SEL_FIELD(access_method);
DMI_SYSFS_SEL_FIELD(status);
DMI_SYSFS_SEL_FIELD(change_token);
DMI_SYSFS_SEL_FIELD(access_method_address);
DMI_SYSFS_SEL_FIELD(header_format);
DMI_SYSFS_SEL_FIELD(type_descriptors_supported_count);
DMI_SYSFS_SEL_FIELD(per_log_type_descriptor_length);
static struct attribute *dmi_sysfs_sel_attrs[] = {
&dmi_sysfs_attr_sel_area_length.attr,
&dmi_sysfs_attr_sel_header_start_offset.attr,
&dmi_sysfs_attr_sel_data_start_offset.attr,
&dmi_sysfs_attr_sel_access_method.attr,
&dmi_sysfs_attr_sel_status.attr,
&dmi_sysfs_attr_sel_change_token.attr,
&dmi_sysfs_attr_sel_access_method_address.attr,
&dmi_sysfs_attr_sel_header_format.attr,
&dmi_sysfs_attr_sel_type_descriptors_supported_count.attr,
&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
NULL,
};
ATTRIBUTE_GROUPS(dmi_sysfs_sel);
static const struct kobj_type dmi_system_event_log_ktype = {
.release = dmi_entry_free,
.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
.default_groups = dmi_sysfs_sel_groups,
};
#ifdef CONFIG_HAS_IOPORT
typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
loff_t offset);
static DEFINE_MUTEX(io_port_lock);
static u8 read_sel_8bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outb((u8)offset, sel->io.index_addr);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static u8 read_sel_2x8bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outb((u8)offset, sel->io.index_addr);
outb((u8)(offset >> 8), sel->io.index_addr + 1);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static u8 read_sel_16bit_indexed_io(const struct dmi_system_event_log *sel,
loff_t offset)
{
u8 ret;
mutex_lock(&io_port_lock);
outw((u16)offset, sel->io.index_addr);
ret = inb(sel->io.data_addr);
mutex_unlock(&io_port_lock);
return ret;
}
static sel_io_reader sel_io_readers[] = {
[DMI_SEL_ACCESS_METHOD_IO8] = read_sel_8bit_indexed_io,
[DMI_SEL_ACCESS_METHOD_IO2x8] = read_sel_2x8bit_indexed_io,
[DMI_SEL_ACCESS_METHOD_IO16] = read_sel_16bit_indexed_io,
};
static ssize_t dmi_sel_raw_read_io(struct dmi_sysfs_entry *entry,
const struct dmi_system_event_log *sel,
char *buf, loff_t pos, size_t count)
{
ssize_t wrote = 0;
sel_io_reader io_reader = sel_io_readers[sel->access_method];
while (count && pos < sel->area_length) {
count--;
*(buf++) = io_reader(sel, pos++);
wrote++;
}
return wrote;
}
#endif
static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
const struct dmi_system_event_log *sel,
char *buf, loff_t pos, size_t count)
{
u8 __iomem *mapped;
ssize_t wrote = 0;
mapped = dmi_remap(sel->access_method_address, sel->area_length);
if (!mapped)
return -EIO;
while (count && pos < sel->area_length) {
count--;
*(buf++) = readb(mapped + pos++);
wrote++;
}
dmi_unmap(mapped);
return wrote;
}
static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_state)
{
struct dmi_read_state *state = _state;
struct dmi_system_event_log sel;
if (sizeof(sel) > dmi_entry_length(dh))
return -EIO;
memcpy(&sel, dh, sizeof(sel));
switch (sel.access_method) {
#ifdef CONFIG_HAS_IOPORT
case DMI_SEL_ACCESS_METHOD_IO8:
case DMI_SEL_ACCESS_METHOD_IO2x8:
case DMI_SEL_ACCESS_METHOD_IO16:
return dmi_sel_raw_read_io(entry, &sel, state->buf,
state->pos, state->count);
#endif
case DMI_SEL_ACCESS_METHOD_PHYS32:
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_GPNV:
pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
return -EIO;
default:
pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
sel.access_method);
return -EIO;
}
}
static ssize_t dmi_sel_raw_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj->parent);
struct dmi_read_state state = {
.buf = buf,
.pos = pos,
.count = count,
};
return find_dmi_entry(entry, dmi_sel_raw_read_helper, &state);
}
static struct bin_attribute dmi_sel_raw_attr = {
.attr = {.name = "raw_event_log", .mode = 0400},
.read = dmi_sel_raw_read,
};
static int dmi_system_event_log(struct dmi_sysfs_entry *entry)
{
int ret;
entry->child = kzalloc(sizeof(*entry->child), GFP_KERNEL);
if (!entry->child)
return -ENOMEM;
ret = kobject_init_and_add(entry->child,
&dmi_system_event_log_ktype,
&entry->kobj,
"system_event_log");
if (ret)
goto out_free;
ret = sysfs_create_bin_file(entry->child, &dmi_sel_raw_attr);
if (ret)
goto out_del;
return 0;
out_del:
kobject_del(entry->child);
out_free:
kfree(entry->child);
return ret;
}
/*************************************************
* Generic DMI entry support.
*************************************************/
static ssize_t dmi_sysfs_entry_length(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.length);
}
static ssize_t dmi_sysfs_entry_handle(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.handle);
}
static ssize_t dmi_sysfs_entry_type(struct dmi_sysfs_entry *entry, char *buf)
{
return sprintf(buf, "%d\n", entry->dh.type);
}
static ssize_t dmi_sysfs_entry_instance(struct dmi_sysfs_entry *entry,
char *buf)
{
return sprintf(buf, "%d\n", entry->instance);
}
static ssize_t dmi_sysfs_entry_position(struct dmi_sysfs_entry *entry,
char *buf)
{
return sprintf(buf, "%d\n", entry->position);
}
static DMI_SYSFS_ATTR(entry, length);
static DMI_SYSFS_ATTR(entry, handle);
static DMI_SYSFS_ATTR(entry, type);
static DMI_SYSFS_ATTR(entry, instance);
static DMI_SYSFS_ATTR(entry, position);
static struct attribute *dmi_sysfs_entry_attrs[] = {
&dmi_sysfs_attr_entry_length.attr,
&dmi_sysfs_attr_entry_handle.attr,
&dmi_sysfs_attr_entry_type.attr,
&dmi_sysfs_attr_entry_instance.attr,
&dmi_sysfs_attr_entry_position.attr,
NULL,
};
ATTRIBUTE_GROUPS(dmi_sysfs_entry);
static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
void *_state)
{
struct dmi_read_state *state = _state;
size_t entry_length;
entry_length = dmi_entry_length(dh);
return memory_read_from_buffer(state->buf, state->count,
&state->pos, dh, entry_length);
}
static ssize_t dmi_entry_raw_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
struct dmi_read_state state = {
.buf = buf,
.pos = pos,
.count = count,
};
return find_dmi_entry(entry, dmi_entry_raw_read_helper, &state);
}
static const struct bin_attribute dmi_entry_raw_attr = {
.attr = {.name = "raw", .mode = 0400},
.read = dmi_entry_raw_read,
};
static void dmi_sysfs_entry_release(struct kobject *kobj)
{
struct dmi_sysfs_entry *entry = to_entry(kobj);
spin_lock(&entry_list_lock);
list_del(&entry->list);
spin_unlock(&entry_list_lock);
kfree(entry);
}
static const struct kobj_type dmi_sysfs_entry_ktype = {
.release = dmi_sysfs_entry_release,
.sysfs_ops = &dmi_sysfs_attr_ops,
.default_groups = dmi_sysfs_entry_groups,
};
static struct kset *dmi_kset;
/* Global count of all instances seen. Only for setup */
static int __initdata instance_counts[MAX_ENTRY_TYPE + 1];
/* Global positional count of all entries seen. Only for setup */
static int __initdata position_count;
static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
void *_ret)
{
struct dmi_sysfs_entry *entry;
int *ret = _ret;
/* If a previous entry saw an error, short circuit */
if (*ret)
return;
/* Allocate and register a new entry into the entries set */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
*ret = -ENOMEM;
return;
}
/* Set the key */
memcpy(&entry->dh, dh, sizeof(*dh));
entry->instance = instance_counts[dh->type]++;
entry->position = position_count++;
entry->kobj.kset = dmi_kset;
*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
"%d-%d", dh->type, entry->instance);
/* Thread on the global list for cleanup */
spin_lock(&entry_list_lock);
list_add_tail(&entry->list, &entry_list);
spin_unlock(&entry_list_lock);
if (*ret) {
kobject_put(&entry->kobj);
return;
}
/* Handle specializations by type */
switch (dh->type) {
case DMI_ENTRY_SYSTEM_EVENT_LOG:
*ret = dmi_system_event_log(entry);
break;
default:
/* No specialization */
break;
}
if (*ret)
goto out_err;
/* Create the raw binary file to access the entry */
*ret = sysfs_create_bin_file(&entry->kobj, &dmi_entry_raw_attr);
if (*ret)
goto out_err;
return;
out_err:
kobject_put(entry->child);
kobject_put(&entry->kobj);
return;
}
static void cleanup_entry_list(void)
{
struct dmi_sysfs_entry *entry, *next;
/* No locks, we are on our way out */
list_for_each_entry_safe(entry, next, &entry_list, list) {
kobject_put(entry->child);
kobject_put(&entry->kobj);
}
}
static int __init dmi_sysfs_init(void)
{
int error;
int val;
if (!dmi_kobj) {
pr_debug("dmi-sysfs: dmi entry is absent.\n");
error = -ENODATA;
goto err;
}
dmi_kset = kset_create_and_add("entries", NULL, dmi_kobj);
if (!dmi_kset) {
error = -ENOMEM;
goto err;
}
val = 0;
error = dmi_walk(dmi_sysfs_register_handle, &val);
if (error)
goto err;
if (val) {
error = val;
goto err;
}
pr_debug("dmi-sysfs: loaded.\n");
return 0;
err:
cleanup_entry_list();
kset_unregister(dmi_kset);
return error;
}
/* clean up everything. */
static void __exit dmi_sysfs_exit(void)
{
pr_debug("dmi-sysfs: unloading.\n");
cleanup_entry_list();
kset_unregister(dmi_kset);
}
module_init(dmi_sysfs_init);
module_exit(dmi_sysfs_exit);
MODULE_AUTHOR("Mike Waychison <[email protected]>");
MODULE_DESCRIPTION("DMI sysfs support");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/dmi-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Export SMBIOS/DMI info via sysfs to userspace
*
* Copyright 2007, Lennart Poettering
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/device.h>
#include <linux/slab.h>
struct dmi_device_attribute{
struct device_attribute dev_attr;
int field;
};
#define to_dmi_dev_attr(_dev_attr) \
container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
static ssize_t sys_dmi_field_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
int field = to_dmi_dev_attr(attr)->field;
ssize_t len;
len = scnprintf(page, PAGE_SIZE, "%s\n", dmi_get_system_info(field));
page[len-1] = '\n';
return len;
}
#define DMI_ATTR(_name, _mode, _show, _field) \
{ .dev_attr = __ATTR(_name, _mode, _show, NULL), \
.field = _field }
#define DEFINE_DMI_ATTR_WITH_SHOW(_name, _mode, _field) \
static struct dmi_device_attribute sys_dmi_##_name##_attr = \
DMI_ATTR(_name, _mode, sys_dmi_field_show, _field);
DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
DEFINE_DMI_ATTR_WITH_SHOW(product_sku, 0444, DMI_PRODUCT_SKU);
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(board_serial, 0400, DMI_BOARD_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(board_asset_tag, 0444, DMI_BOARD_ASSET_TAG);
DEFINE_DMI_ATTR_WITH_SHOW(chassis_vendor, 0444, DMI_CHASSIS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(chassis_type, 0444, DMI_CHASSIS_TYPE);
DEFINE_DMI_ATTR_WITH_SHOW(chassis_version, 0444, DMI_CHASSIS_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(chassis_serial, 0400, DMI_CHASSIS_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(chassis_asset_tag, 0444, DMI_CHASSIS_ASSET_TAG);
static void ascii_filter(char *d, const char *s)
{
/* Filter out characters we don't want to see in the modalias string */
for (; *s; s++)
if (*s > ' ' && *s < 127 && *s != ':')
*(d++) = *s;
*d = 0;
}
static ssize_t get_modalias(char *buffer, size_t buffer_size)
{
/*
* Note new fields need to be added at the end to keep compatibility
* with udev's hwdb which does matches on "`cat dmi/id/modalias`*".
*/
static const struct mafield {
const char *prefix;
int field;
} fields[] = {
{ "bvn", DMI_BIOS_VENDOR },
{ "bvr", DMI_BIOS_VERSION },
{ "bd", DMI_BIOS_DATE },
{ "br", DMI_BIOS_RELEASE },
{ "efr", DMI_EC_FIRMWARE_RELEASE },
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
{ "rvn", DMI_BOARD_VENDOR },
{ "rn", DMI_BOARD_NAME },
{ "rvr", DMI_BOARD_VERSION },
{ "cvn", DMI_CHASSIS_VENDOR },
{ "ct", DMI_CHASSIS_TYPE },
{ "cvr", DMI_CHASSIS_VERSION },
{ "sku", DMI_PRODUCT_SKU },
{ NULL, DMI_NONE }
};
ssize_t l, left;
char *p;
const struct mafield *f;
strcpy(buffer, "dmi");
p = buffer + 3; left = buffer_size - 4;
for (f = fields; f->prefix && left > 0; f++) {
const char *c;
char *t;
c = dmi_get_system_info(f->field);
if (!c)
continue;
t = kmalloc(strlen(c) + 1, GFP_KERNEL);
if (!t)
break;
ascii_filter(t, c);
l = scnprintf(p, left, ":%s%s", f->prefix, t);
kfree(t);
p += l;
left -= l;
}
p[0] = ':';
p[1] = 0;
return p - buffer + 1;
}
static ssize_t sys_dmi_modalias_show(struct device *dev,
struct device_attribute *attr, char *page)
{
ssize_t r;
r = get_modalias(page, PAGE_SIZE-1);
page[r] = '\n';
page[r+1] = 0;
return r+1;
}
static struct device_attribute sys_dmi_modalias_attr =
__ATTR(modalias, 0444, sys_dmi_modalias_show, NULL);
static struct attribute *sys_dmi_attributes[DMI_STRING_MAX+2];
static struct attribute_group sys_dmi_attribute_group = {
.attrs = sys_dmi_attributes,
};
static const struct attribute_group* sys_dmi_attribute_groups[] = {
&sys_dmi_attribute_group,
NULL
};
static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
ssize_t len;
if (add_uevent_var(env, "MODALIAS="))
return -ENOMEM;
len = get_modalias(&env->buf[env->buflen - 1],
sizeof(env->buf) - env->buflen);
if (len >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += len;
return 0;
}
static struct class dmi_class = {
.name = "dmi",
.dev_release = (void(*)(struct device *)) kfree,
.dev_uevent = dmi_dev_uevent,
};
static struct device *dmi_dev;
/* Initialization */
#define ADD_DMI_ATTR(_name, _field) \
if (dmi_get_system_info(_field)) \
sys_dmi_attributes[i++] = &sys_dmi_##_name##_attr.dev_attr.attr;
/* In a separate function to keep gcc 3.2 happy - do NOT merge this in
dmi_id_init! */
static void __init dmi_id_init_attr_table(void)
{
int i;
/* Not necessarily all DMI fields are available on all
* systems, hence let's built an attribute table of just
* what's available */
i = 0;
ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
ADD_DMI_ATTR(product_sku, DMI_PRODUCT_SKU);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
ADD_DMI_ATTR(board_serial, DMI_BOARD_SERIAL);
ADD_DMI_ATTR(board_asset_tag, DMI_BOARD_ASSET_TAG);
ADD_DMI_ATTR(chassis_vendor, DMI_CHASSIS_VENDOR);
ADD_DMI_ATTR(chassis_type, DMI_CHASSIS_TYPE);
ADD_DMI_ATTR(chassis_version, DMI_CHASSIS_VERSION);
ADD_DMI_ATTR(chassis_serial, DMI_CHASSIS_SERIAL);
ADD_DMI_ATTR(chassis_asset_tag, DMI_CHASSIS_ASSET_TAG);
sys_dmi_attributes[i++] = &sys_dmi_modalias_attr.attr;
}
static int __init dmi_id_init(void)
{
int ret;
if (!dmi_available)
return -ENODEV;
dmi_id_init_attr_table();
ret = class_register(&dmi_class);
if (ret)
return ret;
dmi_dev = kzalloc(sizeof(*dmi_dev), GFP_KERNEL);
if (!dmi_dev) {
ret = -ENOMEM;
goto fail_class_unregister;
}
dmi_dev->class = &dmi_class;
dev_set_name(dmi_dev, "id");
dmi_dev->groups = sys_dmi_attribute_groups;
ret = device_register(dmi_dev);
if (ret)
goto fail_put_dmi_dev;
return 0;
fail_put_dmi_dev:
put_device(dmi_dev);
fail_class_unregister:
class_unregister(&dmi_class);
return ret;
}
arch_initcall(dmi_id_init);
| linux-master | drivers/firmware/dmi-id.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Turris Mox rWTM firmware driver
*
* Copyright (C) 2019 Marek Behún <[email protected]>
*/
#include <linux/armada-37xx-rwtm-mailbox.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/hw_random.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define DRIVER_NAME "turris-mox-rwtm"
/*
* The macros and constants below come from Turris Mox's rWTM firmware code.
* This firmware is open source and it's sources can be found at
* https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi.
*/
#define MBOX_STS_SUCCESS (0 << 30)
#define MBOX_STS_FAIL (1 << 30)
#define MBOX_STS_BADCMD (2 << 30)
#define MBOX_STS_ERROR(s) ((s) & (3 << 30))
#define MBOX_STS_VALUE(s) (((s) >> 10) & 0xfffff)
#define MBOX_STS_CMD(s) ((s) & 0x3ff)
enum mbox_cmd {
MBOX_CMD_GET_RANDOM = 1,
MBOX_CMD_BOARD_INFO = 2,
MBOX_CMD_ECDSA_PUB_KEY = 3,
MBOX_CMD_HASH = 4,
MBOX_CMD_SIGN = 5,
MBOX_CMD_VERIFY = 6,
MBOX_CMD_OTP_READ = 7,
MBOX_CMD_OTP_WRITE = 8,
};
struct mox_kobject;
struct mox_rwtm {
struct device *dev;
struct mbox_client mbox_client;
struct mbox_chan *mbox;
struct mox_kobject *kobj;
struct hwrng hwrng;
struct armada_37xx_rwtm_rx_msg reply;
void *buf;
dma_addr_t buf_phys;
struct mutex busy;
struct completion cmd_done;
/* board information */
int has_board_info;
u64 serial_number;
int board_version, ram_size;
u8 mac_address1[6], mac_address2[6];
/* public key burned in eFuse */
int has_pubkey;
u8 pubkey[135];
#ifdef CONFIG_DEBUG_FS
/*
* Signature process. This is currently done via debugfs, because it
* does not conform to the sysfs standard "one file per attribute".
* It should be rewritten via crypto API once akcipher API is available
* from userspace.
*/
struct dentry *debugfs_root;
u32 last_sig[34];
int last_sig_done;
#endif
};
struct mox_kobject {
struct kobject kobj;
struct mox_rwtm *rwtm;
};
static inline struct kobject *rwtm_to_kobj(struct mox_rwtm *rwtm)
{
return &rwtm->kobj->kobj;
}
static inline struct mox_rwtm *to_rwtm(struct kobject *kobj)
{
return container_of(kobj, struct mox_kobject, kobj)->rwtm;
}
static void mox_kobj_release(struct kobject *kobj)
{
kfree(to_rwtm(kobj)->kobj);
}
static const struct kobj_type mox_kobj_ktype = {
.release = mox_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
static int mox_kobj_create(struct mox_rwtm *rwtm)
{
rwtm->kobj = kzalloc(sizeof(*rwtm->kobj), GFP_KERNEL);
if (!rwtm->kobj)
return -ENOMEM;
kobject_init(rwtm_to_kobj(rwtm), &mox_kobj_ktype);
if (kobject_add(rwtm_to_kobj(rwtm), firmware_kobj, "turris-mox-rwtm")) {
kobject_put(rwtm_to_kobj(rwtm));
return -ENXIO;
}
rwtm->kobj->rwtm = rwtm;
return 0;
}
#define MOX_ATTR_RO(name, format, cat) \
static ssize_t \
name##_show(struct kobject *kobj, struct kobj_attribute *a, \
char *buf) \
{ \
struct mox_rwtm *rwtm = to_rwtm(kobj); \
if (!rwtm->has_##cat) \
return -ENODATA; \
return sprintf(buf, format, rwtm->name); \
} \
static struct kobj_attribute mox_attr_##name = __ATTR_RO(name)
MOX_ATTR_RO(serial_number, "%016llX\n", board_info);
MOX_ATTR_RO(board_version, "%i\n", board_info);
MOX_ATTR_RO(ram_size, "%i\n", board_info);
MOX_ATTR_RO(mac_address1, "%pM\n", board_info);
MOX_ATTR_RO(mac_address2, "%pM\n", board_info);
MOX_ATTR_RO(pubkey, "%s\n", pubkey);
static int mox_get_status(enum mbox_cmd cmd, u32 retval)
{
if (MBOX_STS_CMD(retval) != cmd)
return -EIO;
else if (MBOX_STS_ERROR(retval) == MBOX_STS_FAIL)
return -(int)MBOX_STS_VALUE(retval);
else if (MBOX_STS_ERROR(retval) == MBOX_STS_BADCMD)
return -ENOSYS;
else if (MBOX_STS_ERROR(retval) != MBOX_STS_SUCCESS)
return -EIO;
else
return MBOX_STS_VALUE(retval);
}
static const struct attribute *mox_rwtm_attrs[] = {
&mox_attr_serial_number.attr,
&mox_attr_board_version.attr,
&mox_attr_ram_size.attr,
&mox_attr_mac_address1.attr,
&mox_attr_mac_address2.attr,
&mox_attr_pubkey.attr,
NULL
};
static void mox_rwtm_rx_callback(struct mbox_client *cl, void *data)
{
struct mox_rwtm *rwtm = dev_get_drvdata(cl->dev);
struct armada_37xx_rwtm_rx_msg *msg = data;
rwtm->reply = *msg;
complete(&rwtm->cmd_done);
}
static void reply_to_mac_addr(u8 *mac, u32 t1, u32 t2)
{
mac[0] = t1 >> 8;
mac[1] = t1;
mac[2] = t2 >> 24;
mac[3] = t2 >> 16;
mac[4] = t2 >> 8;
mac[5] = t2;
}
static int mox_get_board_info(struct mox_rwtm *rwtm)
{
struct armada_37xx_rwtm_tx_msg msg;
struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
int ret;
msg.command = MBOX_CMD_BOARD_INFO;
ret = mbox_send_message(rwtm->mbox, &msg);
if (ret < 0)
return ret;
ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
if (ret < 0)
return ret;
ret = mox_get_status(MBOX_CMD_BOARD_INFO, reply->retval);
if (ret == -ENODATA) {
dev_warn(rwtm->dev,
"Board does not have manufacturing information burned!\n");
} else if (ret == -ENOSYS) {
dev_notice(rwtm->dev,
"Firmware does not support the BOARD_INFO command\n");
} else if (ret < 0) {
return ret;
} else {
rwtm->serial_number = reply->status[1];
rwtm->serial_number <<= 32;
rwtm->serial_number |= reply->status[0];
rwtm->board_version = reply->status[2];
rwtm->ram_size = reply->status[3];
reply_to_mac_addr(rwtm->mac_address1, reply->status[4],
reply->status[5]);
reply_to_mac_addr(rwtm->mac_address2, reply->status[6],
reply->status[7]);
rwtm->has_board_info = 1;
pr_info("Turris Mox serial number %016llX\n",
rwtm->serial_number);
pr_info(" board version %i\n", rwtm->board_version);
pr_info(" burned RAM size %i MiB\n", rwtm->ram_size);
}
msg.command = MBOX_CMD_ECDSA_PUB_KEY;
ret = mbox_send_message(rwtm->mbox, &msg);
if (ret < 0)
return ret;
ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
if (ret < 0)
return ret;
ret = mox_get_status(MBOX_CMD_ECDSA_PUB_KEY, reply->retval);
if (ret == -ENODATA) {
dev_warn(rwtm->dev, "Board has no public key burned!\n");
} else if (ret == -ENOSYS) {
dev_notice(rwtm->dev,
"Firmware does not support the ECDSA_PUB_KEY command\n");
} else if (ret < 0) {
return ret;
} else {
u32 *s = reply->status;
rwtm->has_pubkey = 1;
sprintf(rwtm->pubkey,
"%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x",
ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7],
s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]);
}
return 0;
}
static int check_get_random_support(struct mox_rwtm *rwtm)
{
struct armada_37xx_rwtm_tx_msg msg;
int ret;
msg.command = MBOX_CMD_GET_RANDOM;
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
msg.args[2] = 4;
ret = mbox_send_message(rwtm->mbox, &msg);
if (ret < 0)
return ret;
ret = wait_for_completion_timeout(&rwtm->cmd_done, HZ / 2);
if (ret < 0)
return ret;
return mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
}
static int mox_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct mox_rwtm *rwtm = (struct mox_rwtm *) rng->priv;
struct armada_37xx_rwtm_tx_msg msg;
int ret;
if (max > 4096)
max = 4096;
msg.command = MBOX_CMD_GET_RANDOM;
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
msg.args[2] = (max + 3) & ~3;
if (!wait) {
if (!mutex_trylock(&rwtm->busy))
return -EBUSY;
} else {
mutex_lock(&rwtm->busy);
}
ret = mbox_send_message(rwtm->mbox, &msg);
if (ret < 0)
goto unlock_mutex;
ret = wait_for_completion_interruptible(&rwtm->cmd_done);
if (ret < 0)
goto unlock_mutex;
ret = mox_get_status(MBOX_CMD_GET_RANDOM, rwtm->reply.retval);
if (ret < 0)
goto unlock_mutex;
memcpy(data, rwtm->buf, max);
ret = max;
unlock_mutex:
mutex_unlock(&rwtm->busy);
return ret;
}
#ifdef CONFIG_DEBUG_FS
static int rwtm_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return nonseekable_open(inode, file);
}
static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len,
loff_t *ppos)
{
struct mox_rwtm *rwtm = file->private_data;
ssize_t ret;
/* only allow one read, of 136 bytes, from position 0 */
if (*ppos != 0)
return 0;
if (len < 136)
return -EINVAL;
if (!rwtm->last_sig_done)
return -ENODATA;
/* 2 arrays of 17 32-bit words are 136 bytes */
ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, 136);
rwtm->last_sig_done = 0;
return ret;
}
static ssize_t do_sign_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct mox_rwtm *rwtm = file->private_data;
struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply;
struct armada_37xx_rwtm_tx_msg msg;
loff_t dummy = 0;
ssize_t ret;
/* the input is a SHA-512 hash, so exactly 64 bytes have to be read */
if (len != 64)
return -EINVAL;
/* if last result is not zero user has not read that information yet */
if (rwtm->last_sig_done)
return -EBUSY;
if (!mutex_trylock(&rwtm->busy))
return -EBUSY;
/*
* Here we have to send:
* 1. Address of the input to sign.
* The input is an array of 17 32-bit words, the first (most
* significat) is 0, the rest 16 words are copied from the SHA-512
* hash given by the user and converted from BE to LE.
* 2. Address of the buffer where ECDSA signature value R shall be
* stored by the rWTM firmware.
* 3. Address of the buffer where ECDSA signature value S shall be
* stored by the rWTM firmware.
*/
memset(rwtm->buf, 0, 4);
ret = simple_write_to_buffer(rwtm->buf + 4, 64, &dummy, buf, len);
if (ret < 0)
goto unlock_mutex;
be32_to_cpu_array(rwtm->buf, rwtm->buf, 17);
msg.command = MBOX_CMD_SIGN;
msg.args[0] = 1;
msg.args[1] = rwtm->buf_phys;
msg.args[2] = rwtm->buf_phys + 68;
msg.args[3] = rwtm->buf_phys + 2 * 68;
ret = mbox_send_message(rwtm->mbox, &msg);
if (ret < 0)
goto unlock_mutex;
ret = wait_for_completion_interruptible(&rwtm->cmd_done);
if (ret < 0)
goto unlock_mutex;
ret = MBOX_STS_VALUE(reply->retval);
if (MBOX_STS_ERROR(reply->retval) != MBOX_STS_SUCCESS)
goto unlock_mutex;
/*
* Here we read the R and S values of the ECDSA signature
* computed by the rWTM firmware and convert their words from
* LE to BE.
*/
memcpy(rwtm->last_sig, rwtm->buf + 68, 136);
cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, 34);
rwtm->last_sig_done = 1;
mutex_unlock(&rwtm->busy);
return len;
unlock_mutex:
mutex_unlock(&rwtm->busy);
return ret;
}
static const struct file_operations do_sign_fops = {
.owner = THIS_MODULE,
.open = rwtm_debug_open,
.read = do_sign_read,
.write = do_sign_write,
.llseek = no_llseek,
};
static int rwtm_register_debugfs(struct mox_rwtm *rwtm)
{
struct dentry *root, *entry;
root = debugfs_create_dir("turris-mox-rwtm", NULL);
if (IS_ERR(root))
return PTR_ERR(root);
entry = debugfs_create_file_unsafe("do_sign", 0600, root, rwtm,
&do_sign_fops);
if (IS_ERR(entry))
goto err_remove;
rwtm->debugfs_root = root;
return 0;
err_remove:
debugfs_remove_recursive(root);
return PTR_ERR(entry);
}
static void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
{
debugfs_remove_recursive(rwtm->debugfs_root);
}
#else
static inline int rwtm_register_debugfs(struct mox_rwtm *rwtm)
{
return 0;
}
static inline void rwtm_unregister_debugfs(struct mox_rwtm *rwtm)
{
}
#endif
static int turris_mox_rwtm_probe(struct platform_device *pdev)
{
struct mox_rwtm *rwtm;
struct device *dev = &pdev->dev;
int ret;
rwtm = devm_kzalloc(dev, sizeof(*rwtm), GFP_KERNEL);
if (!rwtm)
return -ENOMEM;
rwtm->dev = dev;
rwtm->buf = dmam_alloc_coherent(dev, PAGE_SIZE, &rwtm->buf_phys,
GFP_KERNEL);
if (!rwtm->buf)
return -ENOMEM;
ret = mox_kobj_create(rwtm);
if (ret < 0) {
dev_err(dev, "Cannot create turris-mox-rwtm kobject!\n");
return ret;
}
ret = sysfs_create_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
if (ret < 0) {
dev_err(dev, "Cannot create sysfs files!\n");
goto put_kobj;
}
platform_set_drvdata(pdev, rwtm);
mutex_init(&rwtm->busy);
rwtm->mbox_client.dev = dev;
rwtm->mbox_client.rx_callback = mox_rwtm_rx_callback;
rwtm->mbox = mbox_request_channel(&rwtm->mbox_client, 0);
if (IS_ERR(rwtm->mbox)) {
ret = PTR_ERR(rwtm->mbox);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Cannot request mailbox channel: %i\n",
ret);
goto remove_files;
}
init_completion(&rwtm->cmd_done);
ret = mox_get_board_info(rwtm);
if (ret < 0)
dev_warn(dev, "Cannot read board information: %i\n", ret);
ret = check_get_random_support(rwtm);
if (ret < 0) {
dev_notice(dev,
"Firmware does not support the GET_RANDOM command\n");
goto free_channel;
}
rwtm->hwrng.name = DRIVER_NAME "_hwrng";
rwtm->hwrng.read = mox_hwrng_read;
rwtm->hwrng.priv = (unsigned long) rwtm;
ret = devm_hwrng_register(dev, &rwtm->hwrng);
if (ret < 0) {
dev_err(dev, "Cannot register HWRNG: %i\n", ret);
goto free_channel;
}
ret = rwtm_register_debugfs(rwtm);
if (ret < 0) {
dev_err(dev, "Failed creating debugfs entries: %i\n", ret);
goto free_channel;
}
dev_info(dev, "HWRNG successfully registered\n");
return 0;
free_channel:
mbox_free_channel(rwtm->mbox);
remove_files:
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
put_kobj:
kobject_put(rwtm_to_kobj(rwtm));
return ret;
}
static int turris_mox_rwtm_remove(struct platform_device *pdev)
{
struct mox_rwtm *rwtm = platform_get_drvdata(pdev);
rwtm_unregister_debugfs(rwtm);
sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs);
kobject_put(rwtm_to_kobj(rwtm));
mbox_free_channel(rwtm->mbox);
return 0;
}
static const struct of_device_id turris_mox_rwtm_match[] = {
{ .compatible = "cznic,turris-mox-rwtm", },
{ .compatible = "marvell,armada-3700-rwtm-firmware", },
{ },
};
MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match);
static struct platform_driver turris_mox_rwtm_driver = {
.probe = turris_mox_rwtm_probe,
.remove = turris_mox_rwtm_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = turris_mox_rwtm_match,
},
};
module_platform_driver(turris_mox_rwtm_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
MODULE_AUTHOR("Marek Behun <[email protected]>");
| linux-master | drivers/firmware/turris-mox-rwtm.c |
/*
* drivers/firmware/qemu_fw_cfg.c
*
* Copyright 2015 Carnegie Mellon University
*
* Expose entries from QEMU's firmware configuration (fw_cfg) device in
* sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
*
* The fw_cfg device may be instantiated via either an ACPI node (on x86
* and select subsets of aarch64), a Device Tree node (on arm), or using
* a kernel module (or command line) parameter with the following syntax:
*
* [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
* or
* [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]
*
* where:
* <size> := size of ioport or mmio range
* <base> := physical base address of ioport or mmio range
* <ctrl_off> := (optional) offset of control register
* <data_off> := (optional) offset of data register
* <dma_off> := (optional) offset of dma register
*
* e.g.:
* qemu_fw_cfg.ioport=12@0x510:0:1:4 (the default on x86)
* or
* qemu_fw_cfg.mmio=16@0x9020000:8:0:16 (the default on arm)
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <uapi/linux/qemu_fw_cfg.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/crash_core.h>
MODULE_AUTHOR("Gabriel L. Somlo <[email protected]>");
MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
MODULE_LICENSE("GPL");
/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
static u32 fw_cfg_rev;
/* fw_cfg device i/o register addresses */
static bool fw_cfg_is_mmio;
static phys_addr_t fw_cfg_p_base;
static resource_size_t fw_cfg_p_size;
static void __iomem *fw_cfg_dev_base;
static void __iomem *fw_cfg_reg_ctrl;
static void __iomem *fw_cfg_reg_data;
static void __iomem *fw_cfg_reg_dma;
/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
static DEFINE_MUTEX(fw_cfg_dev_lock);
/* pick appropriate endianness for selector key */
static void fw_cfg_sel_endianness(u16 key)
{
if (fw_cfg_is_mmio)
iowrite16be(key, fw_cfg_reg_ctrl);
else
iowrite16(key, fw_cfg_reg_ctrl);
}
#ifdef CONFIG_CRASH_CORE
static inline bool fw_cfg_dma_enabled(void)
{
return (fw_cfg_rev & FW_CFG_VERSION_DMA) && fw_cfg_reg_dma;
}
/* qemu fw_cfg device is sync today, but spec says it may become async */
static void fw_cfg_wait_for_control(struct fw_cfg_dma_access *d)
{
for (;;) {
u32 ctrl = be32_to_cpu(READ_ONCE(d->control));
/* do not reorder the read to d->control */
rmb();
if ((ctrl & ~FW_CFG_DMA_CTL_ERROR) == 0)
return;
cpu_relax();
}
}
static ssize_t fw_cfg_dma_transfer(void *address, u32 length, u32 control)
{
phys_addr_t dma;
struct fw_cfg_dma_access *d = NULL;
ssize_t ret = length;
d = kmalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
ret = -ENOMEM;
goto end;
}
/* fw_cfg device does not need IOMMU protection, so use physical addresses */
*d = (struct fw_cfg_dma_access) {
.address = cpu_to_be64(address ? virt_to_phys(address) : 0),
.length = cpu_to_be32(length),
.control = cpu_to_be32(control)
};
dma = virt_to_phys(d);
iowrite32be((u64)dma >> 32, fw_cfg_reg_dma);
/* force memory to sync before notifying device via MMIO */
wmb();
iowrite32be(dma, fw_cfg_reg_dma + 4);
fw_cfg_wait_for_control(d);
if (be32_to_cpu(READ_ONCE(d->control)) & FW_CFG_DMA_CTL_ERROR) {
ret = -EIO;
}
end:
kfree(d);
return ret;
}
#endif
/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
static ssize_t fw_cfg_read_blob(u16 key,
void *buf, loff_t pos, size_t count)
{
u32 glk = -1U;
acpi_status status;
/* If we have ACPI, ensure mutual exclusion against any potential
* device access by the firmware, e.g. via AML methods:
*/
status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
/* Should never get here */
WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
memset(buf, 0, count);
return -EINVAL;
}
mutex_lock(&fw_cfg_dev_lock);
fw_cfg_sel_endianness(key);
while (pos-- > 0)
ioread8(fw_cfg_reg_data);
ioread8_rep(fw_cfg_reg_data, buf, count);
mutex_unlock(&fw_cfg_dev_lock);
acpi_release_global_lock(glk);
return count;
}
#ifdef CONFIG_CRASH_CORE
/* write chunk of given fw_cfg blob (caller responsible for sanity-check) */
static ssize_t fw_cfg_write_blob(u16 key,
void *buf, loff_t pos, size_t count)
{
u32 glk = -1U;
acpi_status status;
ssize_t ret = count;
/* If we have ACPI, ensure mutual exclusion against any potential
* device access by the firmware, e.g. via AML methods:
*/
status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
/* Should never get here */
WARN(1, "%s: Failed to lock ACPI!\n", __func__);
return -EINVAL;
}
mutex_lock(&fw_cfg_dev_lock);
if (pos == 0) {
ret = fw_cfg_dma_transfer(buf, count, key << 16
| FW_CFG_DMA_CTL_SELECT
| FW_CFG_DMA_CTL_WRITE);
} else {
fw_cfg_sel_endianness(key);
ret = fw_cfg_dma_transfer(NULL, pos, FW_CFG_DMA_CTL_SKIP);
if (ret < 0)
goto end;
ret = fw_cfg_dma_transfer(buf, count, FW_CFG_DMA_CTL_WRITE);
}
end:
mutex_unlock(&fw_cfg_dev_lock);
acpi_release_global_lock(glk);
return ret;
}
#endif /* CONFIG_CRASH_CORE */
/* clean up fw_cfg device i/o */
static void fw_cfg_io_cleanup(void)
{
if (fw_cfg_is_mmio) {
iounmap(fw_cfg_dev_base);
release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
} else {
ioport_unmap(fw_cfg_dev_base);
release_region(fw_cfg_p_base, fw_cfg_p_size);
}
}
/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
# define FW_CFG_CTRL_OFF 0x08
# define FW_CFG_DATA_OFF 0x00
# define FW_CFG_DMA_OFF 0x10
# elif defined(CONFIG_PARISC) /* parisc */
# define FW_CFG_CTRL_OFF 0x00
# define FW_CFG_DATA_OFF 0x04
# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
# define FW_CFG_CTRL_OFF 0x00
# define FW_CFG_DATA_OFF 0x02
# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
# define FW_CFG_CTRL_OFF 0x00
# define FW_CFG_DATA_OFF 0x01
# define FW_CFG_DMA_OFF 0x04
# else
# error "QEMU FW_CFG not available on this architecture!"
# endif
#endif
/* initialize fw_cfg device i/o from platform data */
static int fw_cfg_do_platform_probe(struct platform_device *pdev)
{
char sig[FW_CFG_SIG_SIZE];
struct resource *range, *ctrl, *data, *dma;
/* acquire i/o range details */
fw_cfg_is_mmio = false;
range = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!range) {
fw_cfg_is_mmio = true;
range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!range)
return -EINVAL;
}
fw_cfg_p_base = range->start;
fw_cfg_p_size = resource_size(range);
if (fw_cfg_is_mmio) {
if (!request_mem_region(fw_cfg_p_base,
fw_cfg_p_size, "fw_cfg_mem"))
return -EBUSY;
fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
if (!fw_cfg_dev_base) {
release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
return -EFAULT;
}
} else {
if (!request_region(fw_cfg_p_base,
fw_cfg_p_size, "fw_cfg_io"))
return -EBUSY;
fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
if (!fw_cfg_dev_base) {
release_region(fw_cfg_p_base, fw_cfg_p_size);
return -EFAULT;
}
}
/* were custom register offsets provided (e.g. on the command line)? */
ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
dma = platform_get_resource_byname(pdev, IORESOURCE_REG, "dma");
if (ctrl && data) {
fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
fw_cfg_reg_data = fw_cfg_dev_base + data->start;
} else {
/* use architecture-specific offsets */
fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
}
if (dma)
fw_cfg_reg_dma = fw_cfg_dev_base + dma->start;
#ifdef FW_CFG_DMA_OFF
else
fw_cfg_reg_dma = fw_cfg_dev_base + FW_CFG_DMA_OFF;
#endif
/* verify fw_cfg device signature */
if (fw_cfg_read_blob(FW_CFG_SIGNATURE, sig,
0, FW_CFG_SIG_SIZE) < 0 ||
memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
fw_cfg_io_cleanup();
return -ENODEV;
}
return 0;
}
static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
char *buf)
{
return sprintf(buf, "%u\n", fw_cfg_rev);
}
static const struct kobj_attribute fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev,
};
/* fw_cfg_sysfs_entry type */
struct fw_cfg_sysfs_entry {
struct kobject kobj;
u32 size;
u16 select;
char name[FW_CFG_MAX_FILE_PATH];
struct list_head list;
};
#ifdef CONFIG_CRASH_CORE
static ssize_t fw_cfg_write_vmcoreinfo(const struct fw_cfg_file *f)
{
static struct fw_cfg_vmcoreinfo *data;
ssize_t ret;
data = kmalloc(sizeof(struct fw_cfg_vmcoreinfo), GFP_KERNEL);
if (!data)
return -ENOMEM;
*data = (struct fw_cfg_vmcoreinfo) {
.guest_format = cpu_to_le16(FW_CFG_VMCOREINFO_FORMAT_ELF),
.size = cpu_to_le32(VMCOREINFO_NOTE_SIZE),
.paddr = cpu_to_le64(paddr_vmcoreinfo_note())
};
/* spare ourself reading host format support for now since we
* don't know what else to format - host may ignore ours
*/
ret = fw_cfg_write_blob(be16_to_cpu(f->select), data,
0, sizeof(struct fw_cfg_vmcoreinfo));
kfree(data);
return ret;
}
#endif /* CONFIG_CRASH_CORE */
/* get fw_cfg_sysfs_entry from kobject member */
static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
{
return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
}
/* fw_cfg_sysfs_attribute type */
struct fw_cfg_sysfs_attribute {
struct attribute attr;
ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
};
/* get fw_cfg_sysfs_attribute from attribute member */
static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
{
return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
}
/* global cache of fw_cfg_sysfs_entry objects */
static LIST_HEAD(fw_cfg_entry_cache);
/* kobjects removed lazily by kernel, mutual exclusion needed */
static DEFINE_SPINLOCK(fw_cfg_cache_lock);
static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
{
spin_lock(&fw_cfg_cache_lock);
list_add_tail(&entry->list, &fw_cfg_entry_cache);
spin_unlock(&fw_cfg_cache_lock);
}
static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
{
spin_lock(&fw_cfg_cache_lock);
list_del(&entry->list);
spin_unlock(&fw_cfg_cache_lock);
}
static void fw_cfg_sysfs_cache_cleanup(void)
{
struct fw_cfg_sysfs_entry *entry, *next;
list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
fw_cfg_sysfs_cache_delist(entry);
kobject_del(&entry->kobj);
kobject_put(&entry->kobj);
}
}
/* per-entry attributes and show methods */
#define FW_CFG_SYSFS_ATTR(_attr) \
struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
.attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
.show = fw_cfg_sysfs_show_##_attr, \
}
static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
{
return sprintf(buf, "%u\n", e->size);
}
static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
{
return sprintf(buf, "%u\n", e->select);
}
static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
{
return sprintf(buf, "%s\n", e->name);
}
static FW_CFG_SYSFS_ATTR(size);
static FW_CFG_SYSFS_ATTR(key);
static FW_CFG_SYSFS_ATTR(name);
static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
&fw_cfg_sysfs_attr_size.attr,
&fw_cfg_sysfs_attr_key.attr,
&fw_cfg_sysfs_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(fw_cfg_sysfs_entry);
/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
char *buf)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
struct fw_cfg_sysfs_attribute *attr = to_attr(a);
return attr->show(entry, buf);
}
static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
.show = fw_cfg_sysfs_attr_show,
};
/* release: destructor, to be called via kobject_put() */
static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
kfree(entry);
}
/* kobj_type: ties together all properties required to register an entry */
static struct kobj_type fw_cfg_sysfs_entry_ktype = {
.default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
};
/* raw-read method and attribute */
static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
if (pos > entry->size)
return -EINVAL;
if (count > entry->size - pos)
count = entry->size - pos;
return fw_cfg_read_blob(entry->select, buf, pos, count);
}
static struct bin_attribute fw_cfg_sysfs_attr_raw = {
.attr = { .name = "raw", .mode = S_IRUSR },
.read = fw_cfg_sysfs_read_raw,
};
/*
* Create a kset subdirectory matching each '/' delimited dirname token
* in 'name', starting with sysfs kset/folder 'dir'; At the end, create
* a symlink directed at the given 'target'.
* NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
* to be a well-behaved path name. Whenever a symlink vs. kset directory
* name collision occurs, the kernel will issue big scary warnings while
* refusing to add the offending link or directory. We follow up with our
* own, slightly less scary error messages explaining the situation :)
*/
static int fw_cfg_build_symlink(struct kset *dir,
struct kobject *target, const char *name)
{
int ret;
struct kset *subdir;
struct kobject *ko;
char *name_copy, *p, *tok;
if (!dir || !target || !name || !*name)
return -EINVAL;
/* clone a copy of name for parsing */
name_copy = p = kstrdup(name, GFP_KERNEL);
if (!name_copy)
return -ENOMEM;
/* create folders for each dirname token, then symlink for basename */
while ((tok = strsep(&p, "/")) && *tok) {
/* last (basename) token? If so, add symlink here */
if (!p || !*p) {
ret = sysfs_create_link(&dir->kobj, target, tok);
break;
}
/* does the current dir contain an item named after tok ? */
ko = kset_find_obj(dir, tok);
if (ko) {
/* drop reference added by kset_find_obj */
kobject_put(ko);
/* ko MUST be a kset - we're about to use it as one ! */
if (ko->ktype != dir->kobj.ktype) {
ret = -EINVAL;
break;
}
/* descend into already existing subdirectory */
dir = to_kset(ko);
} else {
/* create new subdirectory kset */
subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
if (!subdir) {
ret = -ENOMEM;
break;
}
subdir->kobj.kset = dir;
subdir->kobj.ktype = dir->kobj.ktype;
ret = kobject_set_name(&subdir->kobj, "%s", tok);
if (ret) {
kfree(subdir);
break;
}
ret = kset_register(subdir);
if (ret) {
kfree(subdir);
break;
}
/* descend into newly created subdirectory */
dir = subdir;
}
}
/* we're done with cloned copy of name */
kfree(name_copy);
return ret;
}
/* recursively unregister fw_cfg/by_name/ kset directory tree */
static void fw_cfg_kset_unregister_recursive(struct kset *kset)
{
struct kobject *k, *next;
list_for_each_entry_safe(k, next, &kset->list, entry)
/* all set members are ksets too, but check just in case... */
if (k->ktype == kset->kobj.ktype)
fw_cfg_kset_unregister_recursive(to_kset(k));
/* symlinks are cleanly and automatically removed with the directory */
kset_unregister(kset);
}
/* kobjects & kset representing top-level, by_key, and by_name folders */
static struct kobject *fw_cfg_top_ko;
static struct kobject *fw_cfg_sel_ko;
static struct kset *fw_cfg_fname_kset;
/* register an individual fw_cfg file */
static int fw_cfg_register_file(const struct fw_cfg_file *f)
{
int err;
struct fw_cfg_sysfs_entry *entry;
#ifdef CONFIG_CRASH_CORE
if (fw_cfg_dma_enabled() &&
strcmp(f->name, FW_CFG_VMCOREINFO_FILENAME) == 0 &&
!is_kdump_kernel()) {
if (fw_cfg_write_vmcoreinfo(f) < 0)
pr_warn("fw_cfg: failed to write vmcoreinfo");
}
#endif
/* allocate new entry */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
/* set file entry information */
entry->size = be32_to_cpu(f->size);
entry->select = be16_to_cpu(f->select);
strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
if (err)
goto err_put_entry;
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
if (err)
goto err_del_entry;
/* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
/* success, add entry to global cache */
fw_cfg_sysfs_cache_enlist(entry);
return 0;
err_del_entry:
kobject_del(&entry->kobj);
err_put_entry:
kobject_put(&entry->kobj);
return err;
}
/* iterate over all fw_cfg directory entries, registering each one */
static int fw_cfg_register_dir_entries(void)
{
int ret = 0;
__be32 files_count;
u32 count, i;
struct fw_cfg_file *dir;
size_t dir_size;
ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, &files_count,
0, sizeof(files_count));
if (ret < 0)
return ret;
count = be32_to_cpu(files_count);
dir_size = count * sizeof(struct fw_cfg_file);
dir = kmalloc(dir_size, GFP_KERNEL);
if (!dir)
return -ENOMEM;
ret = fw_cfg_read_blob(FW_CFG_FILE_DIR, dir,
sizeof(files_count), dir_size);
if (ret < 0)
goto end;
for (i = 0; i < count; i++) {
ret = fw_cfg_register_file(&dir[i]);
if (ret)
break;
}
end:
kfree(dir);
return ret;
}
/* unregister top-level or by_key folder */
static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
{
kobject_del(kobj);
kobject_put(kobj);
}
static int fw_cfg_sysfs_probe(struct platform_device *pdev)
{
int err;
__le32 rev;
/* NOTE: If we supported multiple fw_cfg devices, we'd first create
* a subdirectory named after e.g. pdev->id, then hang per-device
* by_key (and by_name) subdirectories underneath it. However, only
* one fw_cfg device exist system-wide, so if one was already found
* earlier, we might as well stop here.
*/
if (fw_cfg_sel_ko)
return -EBUSY;
/* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
err = -ENOMEM;
fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
if (!fw_cfg_sel_ko)
goto err_sel;
fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
if (!fw_cfg_fname_kset)
goto err_name;
/* initialize fw_cfg device i/o from platform data */
err = fw_cfg_do_platform_probe(pdev);
if (err)
goto err_probe;
/* get revision number, add matching top-level attribute */
err = fw_cfg_read_blob(FW_CFG_ID, &rev, 0, sizeof(rev));
if (err < 0)
goto err_probe;
fw_cfg_rev = le32_to_cpu(rev);
err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
if (err)
goto err_rev;
/* process fw_cfg file directory entry, registering each file */
err = fw_cfg_register_dir_entries();
if (err)
goto err_dir;
/* success */
pr_debug("fw_cfg: loaded.\n");
return 0;
err_dir:
fw_cfg_sysfs_cache_cleanup();
sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
err_rev:
fw_cfg_io_cleanup();
err_probe:
fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
err_name:
fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
err_sel:
return err;
}
static int fw_cfg_sysfs_remove(struct platform_device *pdev)
{
pr_debug("fw_cfg: unloading.\n");
fw_cfg_sysfs_cache_cleanup();
sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
fw_cfg_io_cleanup();
fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
return 0;
}
static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
{ .compatible = "qemu,fw-cfg-mmio", },
{},
};
MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
{ FW_CFG_ACPI_DEVICE_ID, },
{},
};
MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
#endif
static struct platform_driver fw_cfg_sysfs_driver = {
.probe = fw_cfg_sysfs_probe,
.remove = fw_cfg_sysfs_remove,
.driver = {
.name = "fw_cfg",
.of_match_table = fw_cfg_sysfs_mmio_match,
.acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
},
};
#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
static struct platform_device *fw_cfg_cmdline_dev;
/* this probably belongs in e.g. include/linux/types.h,
* but right now we are the only ones doing it...
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define __PHYS_ADDR_PREFIX "ll"
#else
#define __PHYS_ADDR_PREFIX ""
#endif
/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
":%" __PHYS_ADDR_PREFIX "i" \
":%" __PHYS_ADDR_PREFIX "i%n" \
":%" __PHYS_ADDR_PREFIX "i%n"
#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
"0x%" __PHYS_ADDR_PREFIX "x"
#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
":%" __PHYS_ADDR_PREFIX "u" \
":%" __PHYS_ADDR_PREFIX "u"
#define PH_ADDR_PR_4_FMT PH_ADDR_PR_3_FMT \
":%" __PHYS_ADDR_PREFIX "u"
static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
{
struct resource res[4] = {};
char *str;
phys_addr_t base;
resource_size_t size, ctrl_off, data_off, dma_off;
int processed, consumed = 0;
/* only one fw_cfg device can exist system-wide, so if one
* was processed on the command line already, we might as
* well stop here.
*/
if (fw_cfg_cmdline_dev) {
/* avoid leaking previously registered device */
platform_device_unregister(fw_cfg_cmdline_dev);
return -EINVAL;
}
/* consume "<size>" portion of command line argument */
size = memparse(arg, &str);
/* get "@<base>[:<ctrl_off>:<data_off>[:<dma_off>]]" chunks */
processed = sscanf(str, PH_ADDR_SCAN_FMT,
&base, &consumed,
&ctrl_off, &data_off, &consumed,
&dma_off, &consumed);
/* sscanf() must process precisely 1, 3 or 4 chunks:
* <base> is mandatory, optionally followed by <ctrl_off>
* and <data_off>, and <dma_off>;
* there must be no extra characters after the last chunk,
* so str[consumed] must be '\0'.
*/
if (str[consumed] ||
(processed != 1 && processed != 3 && processed != 4))
return -EINVAL;
res[0].start = base;
res[0].end = base + size - 1;
res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
IORESOURCE_IO;
/* insert register offsets, if provided */
if (processed > 1) {
res[1].name = "ctrl";
res[1].start = ctrl_off;
res[1].flags = IORESOURCE_REG;
res[2].name = "data";
res[2].start = data_off;
res[2].flags = IORESOURCE_REG;
}
if (processed > 3) {
res[3].name = "dma";
res[3].start = dma_off;
res[3].flags = IORESOURCE_REG;
}
/* "processed" happens to nicely match the number of resources
* we need to pass in to this platform device.
*/
fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
PLATFORM_DEVID_NONE, res, processed);
return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
}
static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
{
/* stay silent if device was not configured via the command
* line, or if the parameter name (ioport/mmio) doesn't match
* the device setting
*/
if (!fw_cfg_cmdline_dev ||
(!strcmp(kp->name, "mmio") ^
(fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
return 0;
switch (fw_cfg_cmdline_dev->num_resources) {
case 1:
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
resource_size(&fw_cfg_cmdline_dev->resource[0]),
fw_cfg_cmdline_dev->resource[0].start);
case 3:
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
resource_size(&fw_cfg_cmdline_dev->resource[0]),
fw_cfg_cmdline_dev->resource[0].start,
fw_cfg_cmdline_dev->resource[1].start,
fw_cfg_cmdline_dev->resource[2].start);
case 4:
return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_4_FMT,
resource_size(&fw_cfg_cmdline_dev->resource[0]),
fw_cfg_cmdline_dev->resource[0].start,
fw_cfg_cmdline_dev->resource[1].start,
fw_cfg_cmdline_dev->resource[2].start,
fw_cfg_cmdline_dev->resource[3].start);
}
/* Should never get here */
WARN(1, "Unexpected number of resources: %d\n",
fw_cfg_cmdline_dev->num_resources);
return 0;
}
static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
.set = fw_cfg_cmdline_set,
.get = fw_cfg_cmdline_get,
};
device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
static int __init fw_cfg_sysfs_init(void)
{
int ret;
/* create /sys/firmware/qemu_fw_cfg/ top level directory */
fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
if (!fw_cfg_top_ko)
return -ENOMEM;
ret = platform_driver_register(&fw_cfg_sysfs_driver);
if (ret)
fw_cfg_kobj_cleanup(fw_cfg_top_ko);
return ret;
}
static void __exit fw_cfg_sysfs_exit(void)
{
platform_driver_unregister(&fw_cfg_sysfs_driver);
#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
platform_device_unregister(fw_cfg_cmdline_dev);
#endif
/* clean up /sys/firmware/qemu_fw_cfg/ */
fw_cfg_kobj_cleanup(fw_cfg_top_ko);
}
module_init(fw_cfg_sysfs_init);
module_exit(fw_cfg_sysfs_exit);
| linux-master | drivers/firmware/qemu_fw_cfg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SCPI Generic power domain support.
*
* Copyright (C) 2016 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/scpi_protocol.h>
struct scpi_pm_domain {
struct generic_pm_domain genpd;
struct scpi_ops *ops;
u32 domain;
};
/*
* These device power state values are not well-defined in the specification.
* In case, different implementations use different values, we can make these
* specific to compatibles rather than getting these values from device tree.
*/
enum scpi_power_domain_state {
SCPI_PD_STATE_ON = 0,
SCPI_PD_STATE_OFF = 3,
};
#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
{
int ret;
enum scpi_power_domain_state state;
if (power_on)
state = SCPI_PD_STATE_ON;
else
state = SCPI_PD_STATE_OFF;
ret = pd->ops->device_set_power_state(pd->domain, state);
if (ret)
return ret;
return !(state == pd->ops->device_get_power_state(pd->domain));
}
static int scpi_pd_power_on(struct generic_pm_domain *domain)
{
struct scpi_pm_domain *pd = to_scpi_pd(domain);
return scpi_pd_power(pd, true);
}
static int scpi_pd_power_off(struct generic_pm_domain *domain)
{
struct scpi_pm_domain *pd = to_scpi_pd(domain);
return scpi_pd_power(pd, false);
}
static int scpi_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct scpi_pm_domain *scpi_pd;
struct genpd_onecell_data *scpi_pd_data;
struct generic_pm_domain **domains;
struct scpi_ops *scpi_ops;
int ret, num_domains, i;
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EPROBE_DEFER;
if (!np) {
dev_err(dev, "device tree node not found\n");
return -ENODEV;
}
if (!scpi_ops->device_set_power_state ||
!scpi_ops->device_get_power_state) {
dev_err(dev, "power domains not supported in the firmware\n");
return -ENODEV;
}
ret = of_property_read_u32(np, "num-domains", &num_domains);
if (ret) {
dev_err(dev, "number of domains not found\n");
return -EINVAL;
}
scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
if (!scpi_pd)
return -ENOMEM;
scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
if (!scpi_pd_data)
return -ENOMEM;
domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
for (i = 0; i < num_domains; i++, scpi_pd++) {
domains[i] = &scpi_pd->genpd;
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
"%pOFn.%d", np, i);
if (!scpi_pd->genpd.name) {
dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
np, i);
continue;
}
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
/*
* Treat all power domains as off at boot.
*
* The SCP firmware itself may have switched on some domains,
* but for reference counting purpose, keep it this way.
*/
pm_genpd_init(&scpi_pd->genpd, NULL, true);
}
scpi_pd_data->domains = domains;
scpi_pd_data->num_domains = num_domains;
of_genpd_add_provider_onecell(np, scpi_pd_data);
return 0;
}
static const struct of_device_id scpi_power_domain_ids[] = {
{ .compatible = "arm,scpi-power-domains", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
static struct platform_driver scpi_power_domain_driver = {
.driver = {
.name = "scpi_power_domain",
.of_match_table = scpi_power_domain_ids,
},
.probe = scpi_pm_domain_probe,
};
module_platform_driver(scpi_power_domain_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCPI power domain driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/scpi_pm_domain.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2017 Arm Ltd.
#define pr_fmt(fmt) "sdei: " fmt
#include <acpi/ghes.h>
#include <linux/acpi.h>
#include <linux/arm_sdei.h>
#include <linux/arm-smccc.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/cpuhotplug.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
/*
* The call to use to reach the firmware.
*/
static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, struct arm_smccc_res *res);
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
static int sdei_hp_state;
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
bool reregister;
bool reenable;
u32 event_num;
u8 type;
u8 priority;
/* This pointer is handed to firmware as the event argument. */
union {
/* Shared events */
struct sdei_registered_event *registered;
/* CPU private events */
struct sdei_registered_event __percpu *private_registered;
};
};
/* Take the mutex for any API call or modification. Take the mutex first. */
static DEFINE_MUTEX(sdei_events_lock);
/* and then hold this when modifying the list */
static DEFINE_SPINLOCK(sdei_list_lock);
static LIST_HEAD(sdei_list);
/* Private events are registered/enabled via IPI passing one of these */
struct sdei_crosscall_args {
struct sdei_event *event;
atomic_t errors;
int first_error;
};
#define CROSSCALL_INIT(arg, event) \
do { \
arg.event = event; \
arg.first_error = 0; \
atomic_set(&arg.errors, 0); \
} while (0)
static inline int sdei_do_local_call(smp_call_func_t fn,
struct sdei_event *event)
{
struct sdei_crosscall_args arg;
CROSSCALL_INIT(arg, event);
fn(&arg);
return arg.first_error;
}
static inline int sdei_do_cross_call(smp_call_func_t fn,
struct sdei_event *event)
{
struct sdei_crosscall_args arg;
CROSSCALL_INIT(arg, event);
on_each_cpu(fn, &arg, true);
return arg.first_error;
}
static inline void
sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
{
if (err && (atomic_inc_return(&arg->errors) == 1))
arg->first_error = err;
}
static int sdei_to_linux_errno(unsigned long sdei_err)
{
switch (sdei_err) {
case SDEI_NOT_SUPPORTED:
return -EOPNOTSUPP;
case SDEI_INVALID_PARAMETERS:
return -EINVAL;
case SDEI_DENIED:
return -EPERM;
case SDEI_PENDING:
return -EINPROGRESS;
case SDEI_OUT_OF_RESOURCE:
return -ENOMEM;
}
return 0;
}
static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
u64 *result)
{
int err;
struct arm_smccc_res res;
if (sdei_firmware_call) {
sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
&res);
err = sdei_to_linux_errno(res.a0);
} else {
/*
* !sdei_firmware_call means we failed to probe or called
* sdei_mark_interface_broken(). -EIO is not an error returned
* by sdei_to_linux_errno() and is used to suppress messages
* from this driver.
*/
err = -EIO;
res.a0 = SDEI_NOT_SUPPORTED;
}
if (result)
*result = res.a0;
return err;
}
NOKPROBE_SYMBOL(invoke_sdei_fn);
static struct sdei_event *sdei_event_find(u32 event_num)
{
struct sdei_event *e, *found = NULL;
lockdep_assert_held(&sdei_events_lock);
spin_lock(&sdei_list_lock);
list_for_each_entry(e, &sdei_list, list) {
if (e->event_num == event_num) {
found = e;
break;
}
}
spin_unlock(&sdei_list_lock);
return found;
}
int sdei_api_event_context(u32 query, u64 *result)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
result);
}
NOKPROBE_SYMBOL(sdei_api_event_context);
static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
0, 0, result);
}
static struct sdei_event *sdei_event_create(u32 event_num,
sdei_event_callback *cb,
void *cb_arg)
{
int err;
u64 result;
struct sdei_event *event;
struct sdei_registered_event *reg;
lockdep_assert_held(&sdei_events_lock);
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) {
err = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&event->list);
event->event_num = event_num;
err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
&result);
if (err)
goto fail;
event->priority = result;
err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
&result);
if (err)
goto fail;
event->type = result;
if (event->type == SDEI_EVENT_TYPE_SHARED) {
reg = kzalloc(sizeof(*reg), GFP_KERNEL);
if (!reg) {
err = -ENOMEM;
goto fail;
}
reg->event_num = event->event_num;
reg->priority = event->priority;
reg->callback = cb;
reg->callback_arg = cb_arg;
event->registered = reg;
} else {
int cpu;
struct sdei_registered_event __percpu *regs;
regs = alloc_percpu(struct sdei_registered_event);
if (!regs) {
err = -ENOMEM;
goto fail;
}
for_each_possible_cpu(cpu) {
reg = per_cpu_ptr(regs, cpu);
reg->event_num = event->event_num;
reg->priority = event->priority;
reg->callback = cb;
reg->callback_arg = cb_arg;
}
event->private_registered = regs;
}
spin_lock(&sdei_list_lock);
list_add(&event->list, &sdei_list);
spin_unlock(&sdei_list_lock);
return event;
fail:
kfree(event);
return ERR_PTR(err);
}
static void sdei_event_destroy_llocked(struct sdei_event *event)
{
lockdep_assert_held(&sdei_events_lock);
lockdep_assert_held(&sdei_list_lock);
list_del(&event->list);
if (event->type == SDEI_EVENT_TYPE_SHARED)
kfree(event->registered);
else
free_percpu(event->private_registered);
kfree(event);
}
static void sdei_event_destroy(struct sdei_event *event)
{
spin_lock(&sdei_list_lock);
sdei_event_destroy_llocked(event);
spin_unlock(&sdei_list_lock);
}
static int sdei_api_get_version(u64 *version)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
}
int sdei_mask_local_cpu(void)
{
int err;
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
smp_processor_id(), err);
return err;
}
return 0;
}
static void _ipi_mask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
int sdei_unmask_local_cpu(void)
{
int err;
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
smp_processor_id(), err);
return err;
}
return 0;
}
static void _ipi_unmask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
static void _ipi_private_reset(void *ignored)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
pr_warn_once("failed to reset CPU[%u]: %d\n",
smp_processor_id(), err);
}
static int sdei_api_shared_reset(void)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
NULL);
}
static void sdei_mark_interface_broken(void)
{
pr_err("disabling SDEI firmware interface\n");
on_each_cpu(&_ipi_mask_cpu, NULL, true);
sdei_firmware_call = NULL;
}
static int sdei_platform_reset(void)
{
int err;
on_each_cpu(&_ipi_private_reset, NULL, true);
err = sdei_api_shared_reset();
if (err) {
pr_err("Failed to reset platform: %d\n", err);
sdei_mark_interface_broken();
}
return err;
}
static int sdei_api_event_enable(u32 event_num)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
0, NULL);
}
/* Called directly by the hotplug callbacks */
static void _local_event_enable(void *data)
{
int err;
struct sdei_crosscall_args *arg = data;
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
}
int sdei_event_enable(u32 event_num)
{
int err = -EINVAL;
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
event = sdei_event_find(event_num);
if (!event) {
mutex_unlock(&sdei_events_lock);
return -ENOENT;
}
cpus_read_lock();
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_enable(event->event_num);
else
err = sdei_do_cross_call(_local_event_enable, event);
if (!err) {
spin_lock(&sdei_list_lock);
event->reenable = true;
spin_unlock(&sdei_list_lock);
}
cpus_read_unlock();
mutex_unlock(&sdei_events_lock);
return err;
}
static int sdei_api_event_disable(u32 event_num)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
0, 0, NULL);
}
static void _ipi_event_disable(void *data)
{
int err;
struct sdei_crosscall_args *arg = data;
err = sdei_api_event_disable(arg->event->event_num);
sdei_cross_call_return(arg, err);
}
int sdei_event_disable(u32 event_num)
{
int err = -EINVAL;
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
event = sdei_event_find(event_num);
if (!event) {
mutex_unlock(&sdei_events_lock);
return -ENOENT;
}
spin_lock(&sdei_list_lock);
event->reenable = false;
spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_disable(event->event_num);
else
err = sdei_do_cross_call(_ipi_event_disable, event);
mutex_unlock(&sdei_events_lock);
return err;
}
static int sdei_api_event_unregister(u32 event_num)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
0, 0, 0, NULL);
}
/* Called directly by the hotplug callbacks */
static void _local_event_unregister(void *data)
{
int err;
struct sdei_crosscall_args *arg = data;
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
}
int sdei_event_unregister(u32 event_num)
{
int err;
struct sdei_event *event;
WARN_ON(in_nmi());
mutex_lock(&sdei_events_lock);
event = sdei_event_find(event_num);
if (!event) {
pr_warn("Event %u not registered\n", event_num);
err = -ENOENT;
goto unlock;
}
spin_lock(&sdei_list_lock);
event->reregister = false;
event->reenable = false;
spin_unlock(&sdei_list_lock);
if (event->type == SDEI_EVENT_TYPE_SHARED)
err = sdei_api_event_unregister(event->event_num);
else
err = sdei_do_cross_call(_local_event_unregister, event);
if (err)
goto unlock;
sdei_event_destroy(event);
unlock:
mutex_unlock(&sdei_events_lock);
return err;
}
/*
* unregister events, but don't destroy them as they are re-registered by
* sdei_reregister_shared().
*/
static int sdei_unregister_shared(void)
{
int err = 0;
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type != SDEI_EVENT_TYPE_SHARED)
continue;
err = sdei_api_event_unregister(event->event_num);
if (err)
break;
}
spin_unlock(&sdei_list_lock);
mutex_unlock(&sdei_events_lock);
return err;
}
static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
void *arg, u64 flags, u64 affinity)
{
return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
(unsigned long)entry_point, (unsigned long)arg,
flags, affinity, NULL);
}
/* Called directly by the hotplug callbacks */
static void _local_event_register(void *data)
{
int err;
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
sdei_cross_call_return(arg, err);
}
int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
{
int err;
struct sdei_event *event;
WARN_ON(in_nmi());
mutex_lock(&sdei_events_lock);
if (sdei_event_find(event_num)) {
pr_warn("Event %u already registered\n", event_num);
err = -EBUSY;
goto unlock;
}
event = sdei_event_create(event_num, cb, arg);
if (IS_ERR(event)) {
err = PTR_ERR(event);
pr_warn("Failed to create event %u: %d\n", event_num, err);
goto unlock;
}
cpus_read_lock();
if (event->type == SDEI_EVENT_TYPE_SHARED) {
err = sdei_api_event_register(event->event_num,
sdei_entry_point,
event->registered,
SDEI_EVENT_REGISTER_RM_ANY, 0);
} else {
err = sdei_do_cross_call(_local_event_register, event);
if (err)
sdei_do_cross_call(_local_event_unregister, event);
}
if (err) {
sdei_event_destroy(event);
pr_warn("Failed to register event %u: %d\n", event_num, err);
goto cpu_unlock;
}
spin_lock(&sdei_list_lock);
event->reregister = true;
spin_unlock(&sdei_list_lock);
cpu_unlock:
cpus_read_unlock();
unlock:
mutex_unlock(&sdei_events_lock);
return err;
}
static int sdei_reregister_shared(void)
{
int err = 0;
struct sdei_event *event;
mutex_lock(&sdei_events_lock);
spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type != SDEI_EVENT_TYPE_SHARED)
continue;
if (event->reregister) {
err = sdei_api_event_register(event->event_num,
sdei_entry_point, event->registered,
SDEI_EVENT_REGISTER_RM_ANY, 0);
if (err) {
pr_err("Failed to re-register event %u\n",
event->event_num);
sdei_event_destroy_llocked(event);
break;
}
}
if (event->reenable) {
err = sdei_api_event_enable(event->event_num);
if (err) {
pr_err("Failed to re-enable event %u\n",
event->event_num);
break;
}
}
}
spin_unlock(&sdei_list_lock);
mutex_unlock(&sdei_events_lock);
return err;
}
static int sdei_cpuhp_down(unsigned int cpu)
{
struct sdei_event *event;
int err;
/* un-register private events */
spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type == SDEI_EVENT_TYPE_SHARED)
continue;
err = sdei_do_local_call(_local_event_unregister, event);
if (err) {
pr_err("Failed to unregister event %u: %d\n",
event->event_num, err);
}
}
spin_unlock(&sdei_list_lock);
return sdei_mask_local_cpu();
}
static int sdei_cpuhp_up(unsigned int cpu)
{
struct sdei_event *event;
int err;
/* re-register/enable private events */
spin_lock(&sdei_list_lock);
list_for_each_entry(event, &sdei_list, list) {
if (event->type == SDEI_EVENT_TYPE_SHARED)
continue;
if (event->reregister) {
err = sdei_do_local_call(_local_event_register, event);
if (err) {
pr_err("Failed to re-register event %u: %d\n",
event->event_num, err);
}
}
if (event->reenable) {
err = sdei_do_local_call(_local_event_enable, event);
if (err) {
pr_err("Failed to re-enable event %u: %d\n",
event->event_num, err);
}
}
}
spin_unlock(&sdei_list_lock);
return sdei_unmask_local_cpu();
}
/* When entering idle, mask/unmask events for this cpu */
static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
int rv;
WARN_ON_ONCE(preemptible());
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
rv = sdei_unmask_local_cpu();
break;
default:
return NOTIFY_DONE;
}
if (rv)
return notifier_from_errno(rv);
return NOTIFY_OK;
}
static struct notifier_block sdei_pm_nb = {
.notifier_call = sdei_pm_notifier,
};
static int sdei_device_suspend(struct device *dev)
{
on_each_cpu(_ipi_mask_cpu, NULL, true);
return 0;
}
static int sdei_device_resume(struct device *dev)
{
on_each_cpu(_ipi_unmask_cpu, NULL, true);
return 0;
}
/*
* We need all events to be reregistered when we resume from hibernate.
*
* The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
* events during freeze, then re-register and re-enable them during thaw
* and restore.
*/
static int sdei_device_freeze(struct device *dev)
{
int err;
/* unregister private events */
cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
return err;
return 0;
}
static int sdei_device_thaw(struct device *dev)
{
int err;
/* re-register shared events */
err = sdei_reregister_shared();
if (err) {
pr_warn("Failed to re-register shared events...\n");
sdei_mark_interface_broken();
return err;
}
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
return err;
}
sdei_hp_state = err;
return 0;
}
static int sdei_device_restore(struct device *dev)
{
int err;
err = sdei_platform_reset();
if (err)
return err;
return sdei_device_thaw(dev);
}
static const struct dev_pm_ops sdei_pm_ops = {
.suspend = sdei_device_suspend,
.resume = sdei_device_resume,
.freeze = sdei_device_freeze,
.thaw = sdei_device_thaw,
.restore = sdei_device_restore,
};
/*
* Mask all CPUs and unregister all events on panic, reboot or kexec.
*/
static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
/*
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
return NOTIFY_OK;
}
static struct notifier_block sdei_reboot_nb = {
.notifier_call = sdei_reboot_notifier,
};
static void sdei_smccc_smc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, struct arm_smccc_res *res)
{
arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
NOKPROBE_SYMBOL(sdei_smccc_smc);
static void sdei_smccc_hvc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, struct arm_smccc_res *res)
{
arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
NOKPROBE_SYMBOL(sdei_smccc_hvc);
int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
sdei_event_callback *critical_cb)
{
int err;
u64 result;
u32 event_num;
sdei_event_callback *cb;
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
return -EOPNOTSUPP;
event_num = ghes->generic->notify.vector;
if (event_num == 0) {
/*
* Event 0 is reserved by the specification for
* SDEI_EVENT_SIGNAL.
*/
return -EINVAL;
}
err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
&result);
if (err)
return err;
if (result == SDEI_EVENT_PRIORITY_CRITICAL)
cb = critical_cb;
else
cb = normal_cb;
err = sdei_event_register(event_num, cb, ghes);
if (!err)
err = sdei_event_enable(event_num);
return err;
}
int sdei_unregister_ghes(struct ghes *ghes)
{
int i;
int err;
u32 event_num = ghes->generic->notify.vector;
might_sleep();
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
return -EOPNOTSUPP;
/*
* The event may be running on another CPU. Disable it
* to stop new events, then try to unregister a few times.
*/
err = sdei_event_disable(event_num);
if (err)
return err;
for (i = 0; i < 3; i++) {
err = sdei_event_unregister(event_num);
if (err != -EINPROGRESS)
break;
schedule();
}
return err;
}
static int sdei_get_conduit(struct platform_device *pdev)
{
const char *method;
struct device_node *np = pdev->dev.of_node;
sdei_firmware_call = NULL;
if (np) {
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
return SMCCC_CONDUIT_NONE;
}
if (!strcmp("hvc", method)) {
sdei_firmware_call = &sdei_smccc_hvc;
return SMCCC_CONDUIT_HVC;
} else if (!strcmp("smc", method)) {
sdei_firmware_call = &sdei_smccc_smc;
return SMCCC_CONDUIT_SMC;
}
pr_warn("invalid \"method\" property: %s\n", method);
} else if (!acpi_disabled) {
if (acpi_psci_use_hvc()) {
sdei_firmware_call = &sdei_smccc_hvc;
return SMCCC_CONDUIT_HVC;
} else {
sdei_firmware_call = &sdei_smccc_smc;
return SMCCC_CONDUIT_SMC;
}
}
return SMCCC_CONDUIT_NONE;
}
static int sdei_probe(struct platform_device *pdev)
{
int err;
u64 ver = 0;
int conduit;
conduit = sdei_get_conduit(pdev);
if (!sdei_firmware_call)
return 0;
err = sdei_api_get_version(&ver);
if (err) {
pr_err("Failed to get SDEI version: %d\n", err);
sdei_mark_interface_broken();
return err;
}
pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
(int)SDEI_VERSION_VENDOR(ver));
if (SDEI_VERSION_MAJOR(ver) != 1) {
pr_warn("Conflicting SDEI version detected.\n");
sdei_mark_interface_broken();
return -EINVAL;
}
err = sdei_platform_reset();
if (err)
return err;
sdei_entry_point = sdei_arch_get_entry_point(conduit);
if (!sdei_entry_point) {
/* Not supported due to hardware or boot configuration */
sdei_mark_interface_broken();
return 0;
}
err = cpu_pm_register_notifier(&sdei_pm_nb);
if (err) {
pr_warn("Failed to register CPU PM notifier...\n");
goto error;
}
err = register_reboot_notifier(&sdei_reboot_nb);
if (err) {
pr_warn("Failed to register reboot notifier...\n");
goto remove_cpupm;
}
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
sdei_hp_state = err;
return 0;
remove_reboot:
unregister_reboot_notifier(&sdei_reboot_nb);
remove_cpupm:
cpu_pm_unregister_notifier(&sdei_pm_nb);
error:
sdei_mark_interface_broken();
return err;
}
static const struct of_device_id sdei_of_match[] = {
{ .compatible = "arm,sdei-1.0" },
{}
};
static struct platform_driver sdei_driver = {
.driver = {
.name = "sdei",
.pm = &sdei_pm_ops,
.of_match_table = sdei_of_match,
},
.probe = sdei_probe,
};
static bool __init sdei_present_acpi(void)
{
acpi_status status;
struct acpi_table_header *sdei_table_header;
if (acpi_disabled)
return false;
status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
const char *msg = acpi_format_exception(status);
pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
}
if (ACPI_FAILURE(status))
return false;
acpi_put_table(sdei_table_header);
return true;
}
void __init sdei_init(void)
{
struct platform_device *pdev;
int ret;
ret = platform_driver_register(&sdei_driver);
if (ret || !sdei_present_acpi())
return;
pdev = platform_device_register_simple(sdei_driver.driver.name,
0, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
platform_driver_unregister(&sdei_driver);
pr_info("Failed to register ACPI:SDEI platform device %d\n",
ret);
}
}
int sdei_event_handler(struct pt_regs *regs,
struct sdei_registered_event *arg)
{
int err;
u32 event_num = arg->event_num;
err = arg->callback(event_num, regs, arg->callback_arg);
if (err)
pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
event_num, smp_processor_id(), err);
return err;
}
NOKPROBE_SYMBOL(sdei_event_handler);
void sdei_handler_abort(void)
{
/*
* If the crash happened in an SDEI event handler then we need to
* finish the handler with the firmware so that we can have working
* interrupts in the crash kernel.
*/
if (__this_cpu_read(sdei_active_critical_event)) {
pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
__sdei_handler_abort();
__this_cpu_write(sdei_active_critical_event, NULL);
}
if (__this_cpu_read(sdei_active_normal_event)) {
pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
__sdei_handler_abort();
__this_cpu_write(sdei_active_normal_event, NULL);
}
}
| linux-master | drivers/firmware/arm_sdei.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
#include "qcom_scm.h"
/**
* struct arm_smccc_args
* @args: The array of values used in registers in smc instruction
*/
struct arm_smccc_args {
unsigned long args[8];
};
static DEFINE_MUTEX(qcom_scm_lock);
#define QCOM_SCM_EBUSY_WAIT_MS 30
#define QCOM_SCM_EBUSY_MAX_RETRY 20
#define SCM_SMC_N_REG_ARGS 4
#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
#define SCM_SMC_FIRST_REG_IDX 2
#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
struct arm_smccc_res *res)
{
unsigned long a0 = smc->args[0];
struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
smc->args[3], smc->args[4], smc->args[5],
quirk.state.a6, smc->args[7], res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
a0 = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
resume->args[1] = QCOM_SCM_ARGS(1);
resume->args[2] = smc_call_ctx;
}
int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
{
int ret;
struct arm_smccc_res get_wq_res;
struct arm_smccc_args get_wq_ctx = {0};
get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
/* Guaranteed to return only success or error, no WAITQ_* */
__scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
ret = get_wq_res.a0;
if (ret)
return ret;
*wq_ctx = get_wq_res.a1;
*flags = get_wq_res.a2;
*more_pending = get_wq_res.a3;
return 0;
}
static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
struct arm_smccc_res *res)
{
int ret;
u32 wq_ctx, smc_call_ctx;
struct arm_smccc_args resume;
struct arm_smccc_args *smc = waitq;
do {
__scm_smc_do_quirk(smc, res);
if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
wq_ctx = res->a1;
smc_call_ctx = res->a2;
ret = qcom_scm_wait_for_wq_completion(wq_ctx);
if (ret)
return ret;
fill_wq_resume_args(&resume, smc_call_ctx);
smc = &resume;
}
} while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
return 0;
}
static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
struct arm_smccc_res *res, bool atomic)
{
int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
return 0;
}
do {
mutex_lock(&qcom_scm_lock);
ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
if (ret)
return ret;
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
return 0;
}
int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
struct arm_smccc_res smc_res;
struct arm_smccc_args smc = {0};
smc.args[0] = ARM_SMCCC_CALL_VAL(
smccc_call_type,
qcom_smccc_convention,
desc->owner,
SCM_SMC_FNID(desc->svc, desc->cmd));
smc.args[1] = desc->arginfo;
for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
__le32 *args = args_virt;
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
args[i] = cpu_to_le32(desc->args[i +
SCM_SMC_FIRST_EXT_IDX]);
} else {
__le64 *args = args_virt;
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
args[i] = cpu_to_le64(desc->args[i +
SCM_SMC_FIRST_EXT_IDX]);
}
args_phys = dma_map_single(dev, args_virt, alloc_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, args_phys)) {
kfree(args_virt);
return -ENOMEM;
}
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
/* ret error check follows after args_virt cleanup*/
ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
if (ret)
return ret;
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
res->result[2] = smc_res.a3;
}
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
}
| linux-master | drivers/firmware/qcom_scm-smc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Defines interfaces for interacting with the Raspberry Pi firmware's
* property channel.
*
* Copyright © 2015 Broadcom
*/
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
#define MBOX_CHAN(msg) ((msg) & 0xf)
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
static struct platform_device *rpi_hwmon;
static struct platform_device *rpi_clk;
struct rpi_firmware {
struct mbox_client cl;
struct mbox_chan *chan; /* The property channel. */
struct completion c;
u32 enabled;
struct kref consumers;
};
static DEFINE_MUTEX(transaction_lock);
static void response_callback(struct mbox_client *cl, void *msg)
{
struct rpi_firmware *fw = container_of(cl, struct rpi_firmware, cl);
complete(&fw->c);
}
/*
* Sends a request to the firmware through the BCM2835 mailbox driver,
* and synchronously waits for the reply.
*/
static int
rpi_firmware_transaction(struct rpi_firmware *fw, u32 chan, u32 data)
{
u32 message = MBOX_MSG(chan, data);
int ret;
WARN_ON(data & 0xf);
mutex_lock(&transaction_lock);
reinit_completion(&fw->c);
ret = mbox_send_message(fw->chan, &message);
if (ret >= 0) {
if (wait_for_completion_timeout(&fw->c, HZ)) {
ret = 0;
} else {
ret = -ETIMEDOUT;
WARN_ONCE(1, "Firmware transaction timeout");
}
} else {
dev_err(fw->cl.dev, "mbox_send_message returned %d\n", ret);
}
mutex_unlock(&transaction_lock);
return ret;
}
/**
* rpi_firmware_property_list - Submit firmware property list
* @fw: Pointer to firmware structure from rpi_firmware_get().
* @data: Buffer holding tags.
* @tag_size: Size of tags buffer.
*
* Submits a set of concatenated tags to the VPU firmware through the
* mailbox property interface.
*
* The buffer header and the ending tag are added by this function and
* don't need to be supplied, just the actual tags for your operation.
* See struct rpi_firmware_property_tag_header for the per-tag
* structure.
*/
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size)
{
size_t size = tag_size + 12;
u32 *buf;
dma_addr_t bus_addr;
int ret;
/* Packets are processed a dword at a time. */
if (size & 3)
return -EINVAL;
buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
GFP_ATOMIC);
if (!buf)
return -ENOMEM;
/* The firmware will error out without parsing in this case. */
WARN_ON(size >= 1024 * 1024);
buf[0] = size;
buf[1] = RPI_FIRMWARE_STATUS_REQUEST;
memcpy(&buf[2], data, tag_size);
buf[size / 4 - 1] = RPI_FIRMWARE_PROPERTY_END;
wmb();
ret = rpi_firmware_transaction(fw, MBOX_CHAN_PROPERTY, bus_addr);
rmb();
memcpy(data, &buf[2], tag_size);
if (ret == 0 && buf[1] != RPI_FIRMWARE_STATUS_SUCCESS) {
/*
* The tag name here might not be the one causing the
* error, if there were multiple tags in the request.
* But single-tag is the most common, so go with it.
*/
dev_err(fw->cl.dev, "Request 0x%08x returned status 0x%08x\n",
buf[2], buf[1]);
ret = -EINVAL;
}
dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
return ret;
}
EXPORT_SYMBOL_GPL(rpi_firmware_property_list);
/**
* rpi_firmware_property - Submit single firmware property
* @fw: Pointer to firmware structure from rpi_firmware_get().
* @tag: One of enum_mbox_property_tag.
* @tag_data: Tag data buffer.
* @buf_size: Buffer size.
*
* Submits a single tag to the VPU firmware through the mailbox
* property interface.
*
* This is a convenience wrapper around
* rpi_firmware_property_list() to avoid some of the
* boilerplate in property calls.
*/
int rpi_firmware_property(struct rpi_firmware *fw,
u32 tag, void *tag_data, size_t buf_size)
{
struct rpi_firmware_property_tag_header *header;
int ret;
/* Some mailboxes can use over 1k bytes. Rather than checking
* size and using stack or kmalloc depending on requirements,
* just use kmalloc. Mailboxes don't get called enough to worry
* too much about the time taken in the allocation.
*/
void *data = kmalloc(sizeof(*header) + buf_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
header = data;
header->tag = tag;
header->buf_size = buf_size;
header->req_resp_size = 0;
memcpy(data + sizeof(*header), tag_data, buf_size);
ret = rpi_firmware_property_list(fw, data, buf_size + sizeof(*header));
memcpy(tag_data, data + sizeof(*header), buf_size);
kfree(data);
return ret;
}
EXPORT_SYMBOL_GPL(rpi_firmware_property);
static void
rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
{
time64_t date_and_time;
u32 packet;
int ret = rpi_firmware_property(fw,
RPI_FIRMWARE_GET_FIRMWARE_REVISION,
&packet, sizeof(packet));
if (ret)
return;
/* This is not compatible with y2038 */
date_and_time = packet;
dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
}
static void
rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw)
{
u32 packet;
int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_THROTTLED,
&packet, sizeof(packet));
if (ret)
return;
rpi_hwmon = platform_device_register_data(dev, "raspberrypi-hwmon",
-1, NULL, 0);
}
static void rpi_register_clk_driver(struct device *dev)
{
struct device_node *firmware;
/*
* Earlier DTs don't have a node for the firmware clocks but
* rely on us creating a platform device by hand. If we do
* have a node for the firmware clocks, just bail out here.
*/
firmware = of_get_compatible_child(dev->of_node,
"raspberrypi,firmware-clocks");
if (firmware) {
of_node_put(firmware);
return;
}
rpi_clk = platform_device_register_data(dev, "raspberrypi-clk",
-1, NULL, 0);
}
unsigned int rpi_firmware_clk_get_max_rate(struct rpi_firmware *fw, unsigned int id)
{
struct rpi_firmware_clk_rate_request msg =
RPI_FIRMWARE_CLK_RATE_REQUEST(id);
int ret;
ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_MAX_CLOCK_RATE,
&msg, sizeof(msg));
if (ret)
/*
* If our firmware doesn't support that operation, or fails, we
* assume the maximum clock rate is absolute maximum we can
* store over our type.
*/
return UINT_MAX;
return le32_to_cpu(msg.rate);
}
EXPORT_SYMBOL_GPL(rpi_firmware_clk_get_max_rate);
static void rpi_firmware_delete(struct kref *kref)
{
struct rpi_firmware *fw = container_of(kref, struct rpi_firmware,
consumers);
mbox_free_channel(fw->chan);
kfree(fw);
}
void rpi_firmware_put(struct rpi_firmware *fw)
{
kref_put(&fw->consumers, rpi_firmware_delete);
}
EXPORT_SYMBOL_GPL(rpi_firmware_put);
static void devm_rpi_firmware_put(void *data)
{
struct rpi_firmware *fw = data;
rpi_firmware_put(fw);
}
static int rpi_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
/*
* Memory will be freed by rpi_firmware_delete() once all users have
* released their firmware handles. Don't use devm_kzalloc() here.
*/
fw = kzalloc(sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
fw->cl.dev = dev;
fw->cl.rx_callback = response_callback;
fw->cl.tx_block = true;
fw->chan = mbox_request_channel(&fw->cl, 0);
if (IS_ERR(fw->chan)) {
int ret = PTR_ERR(fw->chan);
kfree(fw);
return dev_err_probe(dev, ret, "Failed to get mbox channel\n");
}
init_completion(&fw->c);
kref_init(&fw->consumers);
platform_set_drvdata(pdev, fw);
rpi_firmware_print_firmware_revision(fw);
rpi_register_hwmon_driver(dev, fw);
rpi_register_clk_driver(dev);
return 0;
}
static void rpi_firmware_shutdown(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
if (!fw)
return;
rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
}
static int rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
platform_device_unregister(rpi_hwmon);
rpi_hwmon = NULL;
platform_device_unregister(rpi_clk);
rpi_clk = NULL;
rpi_firmware_put(fw);
return 0;
}
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
};
MODULE_DEVICE_TABLE(of, rpi_firmware_of_match);
struct device_node *rpi_firmware_find_node(void)
{
return of_find_matching_node(NULL, rpi_firmware_of_match);
}
EXPORT_SYMBOL_GPL(rpi_firmware_find_node);
/**
* rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
* The reference to rpi_firmware has to be released with rpi_firmware_put().
*
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
{
struct platform_device *pdev = of_find_device_by_node(firmware_node);
struct rpi_firmware *fw;
if (!pdev)
return NULL;
fw = platform_get_drvdata(pdev);
if (!fw)
goto err_put_device;
if (!kref_get_unless_zero(&fw->consumers))
goto err_put_device;
put_device(&pdev->dev);
return fw;
err_put_device:
put_device(&pdev->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
/**
* devm_rpi_firmware_get - Get pointer to rpi_firmware structure.
* @firmware_node: Pointer to the firmware Device Tree node.
*
* Returns NULL is the firmware device is not ready.
*/
struct rpi_firmware *devm_rpi_firmware_get(struct device *dev,
struct device_node *firmware_node)
{
struct rpi_firmware *fw;
fw = rpi_firmware_get(firmware_node);
if (!fw)
return NULL;
if (devm_add_action_or_reset(dev, devm_rpi_firmware_put, fw))
return NULL;
return fw;
}
EXPORT_SYMBOL_GPL(devm_rpi_firmware_get);
static struct platform_driver rpi_firmware_driver = {
.driver = {
.name = "raspberrypi-firmware",
.of_match_table = rpi_firmware_of_match,
},
.probe = rpi_firmware_probe,
.shutdown = rpi_firmware_shutdown,
.remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_DESCRIPTION("Raspberry Pi firmware driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/raspberrypi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic System Framebuffers
* Copyright (c) 2012-2013 David Herrmann <[email protected]>
*/
/*
* simple-framebuffer probing
* Try to convert "screen_info" into a "simple-framebuffer" compatible mode.
* If the mode is incompatible, we return "false" and let the caller create
* legacy nodes instead.
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/sysfb.h>
static const char simplefb_resname[] = "BOOTFB";
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
/* try parsing screen_info into a simple-framebuffer mode struct */
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
__u8 type;
u32 bits_per_pixel;
unsigned int i;
type = si->orig_video_isVGA;
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
/*
* The meaning of depth and bpp for direct-color formats is
* inconsistent:
*
* - DRM format info specifies depth as the number of color
* bits; including alpha, but not including filler bits.
* - Linux' EFI platform code computes lfb_depth from the
* individual color channels, including the reserved bits.
* - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
* versions use 15.
* - On the kernel command line, 'bpp' of 32 is usually
* XRGB8888 including the filler bits, but 15 is XRGB1555
* not including the filler bit.
*
* It's not easily possible to fix this in struct screen_info,
* as this could break UAPI. The best solution is to compute
* bits_per_pixel from the color bits, reserved bits and
* reported lfb_depth, whichever is highest. In the loop below,
* ignore simplefb formats with alpha bits, as EFI and VESA
* don't specify alpha channels.
*/
if (si->lfb_depth > 8) {
bits_per_pixel = max(max3(si->red_size + si->red_pos,
si->green_size + si->green_pos,
si->blue_size + si->blue_pos),
si->rsvd_size + si->rsvd_pos);
bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
} else {
bits_per_pixel = si->lfb_depth;
}
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct simplefb_format *f = &formats[i];
if (f->transp.length)
continue; /* transparent formats are unsupported by VESA/EFI */
if (bits_per_pixel == f->bits_per_pixel &&
si->red_size == f->red.length &&
si->red_pos == f->red.offset &&
si->green_size == f->green.length &&
si->green_pos == f->green.offset &&
si->blue_size == f->blue.length &&
si->blue_pos == f->blue.offset) {
mode->format = f->name;
mode->width = si->lfb_width;
mode->height = si->lfb_height;
mode->stride = si->lfb_linelength;
return true;
}
}
return false;
}
__init struct platform_device *sysfb_create_simplefb(const struct screen_info *si,
const struct simplefb_platform_data *mode)
{
struct platform_device *pd;
struct resource res;
u64 base, size;
u32 length;
int ret;
/*
* If the 64BIT_BASE capability is set, ext_lfb_base will contain the
* upper half of the base address. Assemble the address, then make sure
* it is valid and we can actually access it.
*/
base = si->lfb_base;
if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
base |= (u64)si->ext_lfb_base << 32;
if (!base || (u64)(resource_size_t)base != base) {
printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
return ERR_PTR(-EINVAL);
}
/*
* Don't use lfb_size as IORESOURCE size, since it may contain the
* entire VMEM, and thus require huge mappings. Use just the part we
* need, that is, the part where the framebuffer is located. But verify
* that it does not exceed the advertised VMEM.
* Note that in case of VBE, the lfb_size is shifted by 16 bits for
* historical reasons.
*/
size = si->lfb_size;
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
size <<= 16;
length = mode->height * mode->stride;
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return ERR_PTR(-EINVAL);
}
length = PAGE_ALIGN(length);
/* setup IORESOURCE_MEM as framebuffer memory */
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM;
res.name = simplefb_resname;
res.start = base;
res.end = res.start + length - 1;
if (res.end <= res.start)
return ERR_PTR(-EINVAL);
pd = platform_device_alloc("simple-framebuffer", 0);
if (!pd)
return ERR_PTR(-ENOMEM);
sysfb_set_efifb_fwnode(pd);
ret = platform_device_add_resources(pd, &res, 1);
if (ret)
goto err_put_device;
ret = platform_device_add_data(pd, mode, sizeof(*mode));
if (ret)
goto err_put_device;
ret = platform_device_add(pd);
if (ret)
goto err_put_device;
return pd;
err_put_device:
platform_device_put(pd);
return ERR_PTR(ret);
}
| linux-master | drivers/firmware/sysfb_simplefb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/firmware/edd.c
* Copyright (C) 2002, 2003, 2004 Dell Inc.
* by Matt Domsch <[email protected]>
* disk signature by Matt Domsch, Andrew Wilks, and Sandeep K. Shandilya
* legacy CHS by Patrick J. LoPresti <[email protected]>
*
* BIOS Enhanced Disk Drive Services (EDD)
* conformant to T13 Committee www.t13.org
* projects 1572D, 1484D, 1386D, 1226DT
*
* This code takes information provided by BIOS EDD calls
* fn41 - Check Extensions Present and
* fn48 - Get Device Parameters with EDD extensions
* made in setup.S, copied to safe structures in setup.c,
* and presents it in sysfs.
*
* Please see http://linux.dell.com/edd/results.html for
* the list of BIOSs which have been reported to implement EDD.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/limits.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/edd.h>
#define EDD_VERSION "0.16"
#define EDD_DATE "2004-Jun-25"
MODULE_AUTHOR("Matt Domsch <[email protected]>");
MODULE_DESCRIPTION("sysfs interface to BIOS EDD information");
MODULE_LICENSE("GPL");
MODULE_VERSION(EDD_VERSION);
#define left (PAGE_SIZE - (p - buf) - 1)
struct edd_device {
unsigned int index;
unsigned int mbr_signature;
struct edd_info *info;
struct kobject kobj;
};
struct edd_attribute {
struct attribute attr;
ssize_t(*show) (struct edd_device * edev, char *buf);
int (*test) (struct edd_device * edev);
};
/* forward declarations */
static int edd_dev_is_type(struct edd_device *edev, const char *type);
static struct pci_dev *edd_get_pci_dev(struct edd_device *edev);
static struct edd_device *edd_devices[EDD_MBR_SIG_MAX];
#define EDD_DEVICE_ATTR(_name,_mode,_show,_test) \
struct edd_attribute edd_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.test = _test, \
};
static int
edd_has_mbr_signature(struct edd_device *edev)
{
return edev->index < min_t(unsigned char, edd.mbr_signature_nr, EDD_MBR_SIG_MAX);
}
static int
edd_has_edd_info(struct edd_device *edev)
{
return edev->index < min_t(unsigned char, edd.edd_info_nr, EDDMAXNR);
}
static inline struct edd_info *
edd_dev_get_info(struct edd_device *edev)
{
return edev->info;
}
static inline void
edd_dev_set_info(struct edd_device *edev, int i)
{
edev->index = i;
if (edd_has_mbr_signature(edev))
edev->mbr_signature = edd.mbr_signature[i];
if (edd_has_edd_info(edev))
edev->info = &edd.edd_info[i];
}
#define to_edd_attr(_attr) container_of(_attr,struct edd_attribute,attr)
#define to_edd_device(obj) container_of(obj,struct edd_device,kobj)
static ssize_t
edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
{
struct edd_device *dev = to_edd_device(kobj);
struct edd_attribute *edd_attr = to_edd_attr(attr);
ssize_t ret = -EIO;
if (edd_attr->show)
ret = edd_attr->show(dev, buf);
return ret;
}
static const struct sysfs_ops edd_attr_ops = {
.show = edd_attr_show,
};
static ssize_t
edd_show_host_bus(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
int i;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
for (i = 0; i < 4; i++) {
if (isprint(info->params.host_bus_type[i])) {
p += scnprintf(p, left, "%c", info->params.host_bus_type[i]);
} else {
p += scnprintf(p, left, " ");
}
}
if (!strncmp(info->params.host_bus_type, "ISA", 3)) {
p += scnprintf(p, left, "\tbase_address: %x\n",
info->params.interface_path.isa.base_address);
} else if (!strncmp(info->params.host_bus_type, "PCIX", 4) ||
!strncmp(info->params.host_bus_type, "PCI", 3) ||
!strncmp(info->params.host_bus_type, "XPRS", 4)) {
p += scnprintf(p, left,
"\t%02x:%02x.%d channel: %u\n",
info->params.interface_path.pci.bus,
info->params.interface_path.pci.slot,
info->params.interface_path.pci.function,
info->params.interface_path.pci.channel);
} else if (!strncmp(info->params.host_bus_type, "IBND", 4) ||
!strncmp(info->params.host_bus_type, "HTPT", 4)) {
p += scnprintf(p, left,
"\tTBD: %llx\n",
info->params.interface_path.ibnd.reserved);
} else {
p += scnprintf(p, left, "\tunknown: %llx\n",
info->params.interface_path.unknown.reserved);
}
return (p - buf);
}
static ssize_t
edd_show_interface(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
int i;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
for (i = 0; i < 8; i++) {
if (isprint(info->params.interface_type[i])) {
p += scnprintf(p, left, "%c", info->params.interface_type[i]);
} else {
p += scnprintf(p, left, " ");
}
}
if (!strncmp(info->params.interface_type, "ATAPI", 5)) {
p += scnprintf(p, left, "\tdevice: %u lun: %u\n",
info->params.device_path.atapi.device,
info->params.device_path.atapi.lun);
} else if (!strncmp(info->params.interface_type, "ATA", 3)) {
p += scnprintf(p, left, "\tdevice: %u\n",
info->params.device_path.ata.device);
} else if (!strncmp(info->params.interface_type, "SCSI", 4)) {
p += scnprintf(p, left, "\tid: %u lun: %llu\n",
info->params.device_path.scsi.id,
info->params.device_path.scsi.lun);
} else if (!strncmp(info->params.interface_type, "USB", 3)) {
p += scnprintf(p, left, "\tserial_number: %llx\n",
info->params.device_path.usb.serial_number);
} else if (!strncmp(info->params.interface_type, "1394", 4)) {
p += scnprintf(p, left, "\teui: %llx\n",
info->params.device_path.i1394.eui);
} else if (!strncmp(info->params.interface_type, "FIBRE", 5)) {
p += scnprintf(p, left, "\twwid: %llx lun: %llx\n",
info->params.device_path.fibre.wwid,
info->params.device_path.fibre.lun);
} else if (!strncmp(info->params.interface_type, "I2O", 3)) {
p += scnprintf(p, left, "\tidentity_tag: %llx\n",
info->params.device_path.i2o.identity_tag);
} else if (!strncmp(info->params.interface_type, "RAID", 4)) {
p += scnprintf(p, left, "\tidentity_tag: %x\n",
info->params.device_path.raid.array_number);
} else if (!strncmp(info->params.interface_type, "SATA", 4)) {
p += scnprintf(p, left, "\tdevice: %u\n",
info->params.device_path.sata.device);
} else {
p += scnprintf(p, left, "\tunknown: %llx %llx\n",
info->params.device_path.unknown.reserved1,
info->params.device_path.unknown.reserved2);
}
return (p - buf);
}
/**
* edd_show_raw_data() - copies raw data to buffer for userspace to parse
* @edev: target edd_device
* @buf: output buffer
*
* Returns: number of bytes written, or -EINVAL on failure
*/
static ssize_t
edd_show_raw_data(struct edd_device *edev, char *buf)
{
struct edd_info *info;
ssize_t len = sizeof (info->params);
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE))
len = info->params.length;
/* In case of buggy BIOSs */
if (len > (sizeof(info->params)))
len = sizeof(info->params);
memcpy(buf, &info->params, len);
return len;
}
static ssize_t
edd_show_version(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "0x%02x\n", info->version);
return (p - buf);
}
static ssize_t
edd_show_mbr_signature(struct edd_device *edev, char *buf)
{
char *p = buf;
p += scnprintf(p, left, "0x%08x\n", edev->mbr_signature);
return (p - buf);
}
static ssize_t
edd_show_extensions(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
if (info->interface_support & EDD_EXT_FIXED_DISK_ACCESS) {
p += scnprintf(p, left, "Fixed disk access\n");
}
if (info->interface_support & EDD_EXT_DEVICE_LOCKING_AND_EJECTING) {
p += scnprintf(p, left, "Device locking and ejecting\n");
}
if (info->interface_support & EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT) {
p += scnprintf(p, left, "Enhanced Disk Drive support\n");
}
if (info->interface_support & EDD_EXT_64BIT_EXTENSIONS) {
p += scnprintf(p, left, "64-bit extensions\n");
}
return (p - buf);
}
static ssize_t
edd_show_info_flags(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
if (info->params.info_flags & EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT)
p += scnprintf(p, left, "DMA boundary error transparent\n");
if (info->params.info_flags & EDD_INFO_GEOMETRY_VALID)
p += scnprintf(p, left, "geometry valid\n");
if (info->params.info_flags & EDD_INFO_REMOVABLE)
p += scnprintf(p, left, "removable\n");
if (info->params.info_flags & EDD_INFO_WRITE_VERIFY)
p += scnprintf(p, left, "write verify\n");
if (info->params.info_flags & EDD_INFO_MEDIA_CHANGE_NOTIFICATION)
p += scnprintf(p, left, "media change notification\n");
if (info->params.info_flags & EDD_INFO_LOCKABLE)
p += scnprintf(p, left, "lockable\n");
if (info->params.info_flags & EDD_INFO_NO_MEDIA_PRESENT)
p += scnprintf(p, left, "no media present\n");
if (info->params.info_flags & EDD_INFO_USE_INT13_FN50)
p += scnprintf(p, left, "use int13 fn50\n");
return (p - buf);
}
static ssize_t
edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->legacy_max_cylinder);
return (p - buf);
}
static ssize_t
edd_show_legacy_max_head(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->legacy_max_head);
return (p - buf);
}
static ssize_t
edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->legacy_sectors_per_track);
return (p - buf);
}
static ssize_t
edd_show_default_cylinders(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->params.num_default_cylinders);
return (p - buf);
}
static ssize_t
edd_show_default_heads(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->params.num_default_heads);
return (p - buf);
}
static ssize_t
edd_show_default_sectors_per_track(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%u\n", info->params.sectors_per_track);
return (p - buf);
}
static ssize_t
edd_show_sectors(struct edd_device *edev, char *buf)
{
struct edd_info *info;
char *p = buf;
if (!edev)
return -EINVAL;
info = edd_dev_get_info(edev);
if (!info || !buf)
return -EINVAL;
p += scnprintf(p, left, "%llu\n", info->params.number_of_sectors);
return (p - buf);
}
/*
* Some device instances may not have all the above attributes,
* or the attribute values may be meaningless (i.e. if
* the device is < EDD 3.0, it won't have host_bus and interface
* information), so don't bother making files for them. Likewise
* if the default_{cylinders,heads,sectors_per_track} values
* are zero, the BIOS doesn't provide sane values, don't bother
* creating files for them either.
*/
static int
edd_has_legacy_max_cylinder(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->legacy_max_cylinder > 0;
}
static int
edd_has_legacy_max_head(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->legacy_max_head > 0;
}
static int
edd_has_legacy_sectors_per_track(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->legacy_sectors_per_track > 0;
}
static int
edd_has_default_cylinders(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->params.num_default_cylinders > 0;
}
static int
edd_has_default_heads(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->params.num_default_heads > 0;
}
static int
edd_has_default_sectors_per_track(struct edd_device *edev)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
return info->params.sectors_per_track > 0;
}
static int
edd_has_edd30(struct edd_device *edev)
{
struct edd_info *info;
int i;
u8 csum = 0;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (!info)
return 0;
if (!(info->params.key == 0xBEDD || info->params.key == 0xDDBE)) {
return 0;
}
/* We support only T13 spec */
if (info->params.device_path_info_length != 44)
return 0;
for (i = 30; i < info->params.device_path_info_length + 30; i++)
csum += *(((u8 *)&info->params) + i);
if (csum)
return 0;
return 1;
}
static EDD_DEVICE_ATTR(raw_data, 0444, edd_show_raw_data, edd_has_edd_info);
static EDD_DEVICE_ATTR(version, 0444, edd_show_version, edd_has_edd_info);
static EDD_DEVICE_ATTR(extensions, 0444, edd_show_extensions, edd_has_edd_info);
static EDD_DEVICE_ATTR(info_flags, 0444, edd_show_info_flags, edd_has_edd_info);
static EDD_DEVICE_ATTR(sectors, 0444, edd_show_sectors, edd_has_edd_info);
static EDD_DEVICE_ATTR(legacy_max_cylinder, 0444,
edd_show_legacy_max_cylinder,
edd_has_legacy_max_cylinder);
static EDD_DEVICE_ATTR(legacy_max_head, 0444, edd_show_legacy_max_head,
edd_has_legacy_max_head);
static EDD_DEVICE_ATTR(legacy_sectors_per_track, 0444,
edd_show_legacy_sectors_per_track,
edd_has_legacy_sectors_per_track);
static EDD_DEVICE_ATTR(default_cylinders, 0444, edd_show_default_cylinders,
edd_has_default_cylinders);
static EDD_DEVICE_ATTR(default_heads, 0444, edd_show_default_heads,
edd_has_default_heads);
static EDD_DEVICE_ATTR(default_sectors_per_track, 0444,
edd_show_default_sectors_per_track,
edd_has_default_sectors_per_track);
static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
/* These attributes are conditional and only added for some devices. */
static struct edd_attribute * edd_attrs[] = {
&edd_attr_raw_data,
&edd_attr_version,
&edd_attr_extensions,
&edd_attr_info_flags,
&edd_attr_sectors,
&edd_attr_legacy_max_cylinder,
&edd_attr_legacy_max_head,
&edd_attr_legacy_sectors_per_track,
&edd_attr_default_cylinders,
&edd_attr_default_heads,
&edd_attr_default_sectors_per_track,
&edd_attr_interface,
&edd_attr_host_bus,
&edd_attr_mbr_signature,
NULL,
};
/**
* edd_release - free edd structure
* @kobj: kobject of edd structure
*
* This is called when the refcount of the edd structure
* reaches 0. This should happen right after we unregister,
* but just in case, we use the release callback anyway.
*/
static void edd_release(struct kobject * kobj)
{
struct edd_device * dev = to_edd_device(kobj);
kfree(dev);
}
static const struct kobj_type edd_ktype = {
.release = edd_release,
.sysfs_ops = &edd_attr_ops,
};
static struct kset *edd_kset;
/**
* edd_dev_is_type() - is this EDD device a 'type' device?
* @edev: target edd_device
* @type: a host bus or interface identifier string per the EDD spec
*
* Returns 1 (TRUE) if it is a 'type' device, 0 otherwise.
*/
static int
edd_dev_is_type(struct edd_device *edev, const char *type)
{
struct edd_info *info;
if (!edev)
return 0;
info = edd_dev_get_info(edev);
if (type && info) {
if (!strncmp(info->params.host_bus_type, type, strlen(type)) ||
!strncmp(info->params.interface_type, type, strlen(type)))
return 1;
}
return 0;
}
/**
* edd_get_pci_dev() - finds pci_dev that matches edev
* @edev: edd_device
*
* Returns pci_dev if found, or NULL
*/
static struct pci_dev *
edd_get_pci_dev(struct edd_device *edev)
{
struct edd_info *info = edd_dev_get_info(edev);
if (edd_dev_is_type(edev, "PCI") || edd_dev_is_type(edev, "XPRS")) {
return pci_get_domain_bus_and_slot(0,
info->params.interface_path.pci.bus,
PCI_DEVFN(info->params.interface_path.pci.slot,
info->params.interface_path.pci.function));
}
return NULL;
}
static int
edd_create_symlink_to_pcidev(struct edd_device *edev)
{
struct pci_dev *pci_dev = edd_get_pci_dev(edev);
int ret;
if (!pci_dev)
return 1;
ret = sysfs_create_link(&edev->kobj,&pci_dev->dev.kobj,"pci_dev");
pci_dev_put(pci_dev);
return ret;
}
static inline void
edd_device_unregister(struct edd_device *edev)
{
kobject_put(&edev->kobj);
}
static void edd_populate_dir(struct edd_device * edev)
{
struct edd_attribute * attr;
int error = 0;
int i;
for (i = 0; (attr = edd_attrs[i]) && !error; i++) {
if (!attr->test || attr->test(edev))
error = sysfs_create_file(&edev->kobj,&attr->attr);
}
if (!error) {
edd_create_symlink_to_pcidev(edev);
}
}
static int
edd_device_register(struct edd_device *edev, int i)
{
int error;
if (!edev)
return 1;
edd_dev_set_info(edev, i);
edev->kobj.kset = edd_kset;
error = kobject_init_and_add(&edev->kobj, &edd_ktype, NULL,
"int13_dev%02x", 0x80 + i);
if (!error) {
edd_populate_dir(edev);
kobject_uevent(&edev->kobj, KOBJ_ADD);
}
return error;
}
static inline int edd_num_devices(void)
{
return max_t(unsigned char,
min_t(unsigned char, EDD_MBR_SIG_MAX, edd.mbr_signature_nr),
min_t(unsigned char, EDDMAXNR, edd.edd_info_nr));
}
/**
* edd_init() - creates sysfs tree of EDD data
*/
static int __init
edd_init(void)
{
int i;
int rc=0;
struct edd_device *edev;
if (!edd_num_devices())
return -ENODEV;
printk(KERN_INFO "BIOS EDD facility v%s %s, %d devices found\n",
EDD_VERSION, EDD_DATE, edd_num_devices());
edd_kset = kset_create_and_add("edd", NULL, firmware_kobj);
if (!edd_kset)
return -ENOMEM;
for (i = 0; i < edd_num_devices(); i++) {
edev = kzalloc(sizeof (*edev), GFP_KERNEL);
if (!edev) {
rc = -ENOMEM;
goto out;
}
rc = edd_device_register(edev, i);
if (rc) {
kfree(edev);
goto out;
}
edd_devices[i] = edev;
}
return 0;
out:
while (--i >= 0)
edd_device_unregister(edd_devices[i]);
kset_unregister(edd_kset);
return rc;
}
static void __exit
edd_exit(void)
{
int i;
struct edd_device *edev;
for (i = 0; i < edd_num_devices(); i++) {
if ((edev = edd_devices[i]))
edd_device_unregister(edev);
}
kset_unregister(edd_kset);
}
late_initcall(edd_init);
module_exit(edd_exit);
| linux-master | drivers/firmware/edd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/of.h>
static struct imx_sc_ipc *imx_sc_soc_ipc_handle;
struct imx_sc_msg_misc_get_soc_id {
struct imx_sc_rpc_msg hdr;
union {
struct {
u32 control;
u16 resource;
} __packed req;
struct {
u32 id;
} resp;
} data;
} __packed __aligned(4);
struct imx_sc_msg_misc_get_soc_uid {
struct imx_sc_rpc_msg hdr;
u32 uid_low;
u32 uid_high;
} __packed;
static int imx_scu_soc_uid(u64 *soc_uid)
{
struct imx_sc_msg_misc_get_soc_uid msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_MISC;
hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
hdr->size = 1;
ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
return ret;
}
*soc_uid = msg.uid_high;
*soc_uid <<= 32;
*soc_uid |= msg.uid_low;
return 0;
}
static int imx_scu_soc_id(void)
{
struct imx_sc_msg_misc_get_soc_id msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_MISC;
hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
hdr->size = 3;
msg.data.req.control = IMX_SC_C_ID;
msg.data.req.resource = IMX_SC_R_SYSTEM;
ret = imx_scu_call_rpc(imx_sc_soc_ipc_handle, &msg, true);
if (ret) {
pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
return ret;
}
return msg.data.resp.id;
}
static const char *imx_scu_soc_name(u32 id)
{
switch (id) {
case 0x1:
return "i.MX8QM";
case 0x2:
return "i.MX8QXP";
case 0xe:
return "i.MX8DXL";
default:
break;
}
return "NULL";
}
int imx_scu_soc_init(struct device *dev)
{
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
int id, ret;
u64 uid = 0;
u32 val;
ret = imx_scu_get_handle(&imx_sc_soc_ipc_handle);
if (ret)
return ret;
soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr),
GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
soc_dev_attr->family = "Freescale i.MX";
ret = of_property_read_string(of_root,
"model",
&soc_dev_attr->machine);
if (ret)
return ret;
id = imx_scu_soc_id();
if (id < 0)
return -EINVAL;
ret = imx_scu_soc_uid(&uid);
if (ret < 0)
return -EINVAL;
/* format soc_id value passed from SCU firmware */
val = id & 0x1f;
soc_dev_attr->soc_id = imx_scu_soc_name(val);
/* format revision value passed from SCU firmware */
val = (id >> 5) & 0xf;
val = (((val >> 2) + 1) << 4) | (val & 0x3);
soc_dev_attr->revision = devm_kasprintf(dev, GFP_KERNEL, "%d.%d",
(val >> 4) & 0xf, val & 0xf);
if (!soc_dev_attr->revision)
return -ENOMEM;
soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL,
"%016llX", uid);
if (!soc_dev_attr->serial_number)
return -ENOMEM;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
return 0;
}
| linux-master | drivers/firmware/imx/imx-scu-soc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020 NXP
*
* File containing client-side RPC functions for the RM service. These
* function are ported to clients that communicate to the SC.
*/
#include <linux/firmware/imx/svc/rm.h>
struct imx_sc_msg_rm_rsrc_owned {
struct imx_sc_rpc_msg hdr;
u16 resource;
} __packed __aligned(4);
/*
* This function check @resource is owned by current partition or not
*
* @param[in] ipc IPC handle
* @param[in] resource resource the control is associated with
*
* @return Returns 0 for not owned and 1 for owned.
*/
bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
{
struct imx_sc_msg_rm_rsrc_owned msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_RM;
hdr->func = IMX_SC_RM_FUNC_IS_RESOURCE_OWNED;
hdr->size = 2;
msg.resource = resource;
/*
* SCU firmware only returns value 0 or 1
* for resource owned check which means not owned or owned.
* So it is always successful.
*/
imx_scu_call_rpc(ipc, &msg, true);
return hdr->func;
}
EXPORT_SYMBOL(imx_sc_rm_is_resource_owned);
struct imx_sc_msg_rm_get_resource_owner {
struct imx_sc_rpc_msg hdr;
union {
struct {
u16 resource;
} req;
struct {
u8 val;
} resp;
} data;
} __packed __aligned(4);
/*
* This function get @resource partition number
*
* @param[in] ipc IPC handle
* @param[in] resource resource the control is associated with
* @param[out] pt pointer to return the partition number
*
* @return Returns 0 for success and < 0 for errors.
*/
int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
{
struct imx_sc_msg_rm_get_resource_owner msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_RM;
hdr->func = IMX_SC_RM_FUNC_GET_RESOURCE_OWNER;
hdr->size = 2;
msg.data.req.resource = resource;
ret = imx_scu_call_rpc(ipc, &msg, true);
if (ret)
return ret;
if (pt)
*pt = msg.data.resp.val;
return 0;
}
EXPORT_SYMBOL(imx_sc_rm_get_resource_owner);
| linux-master | drivers/firmware/imx/rm.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019 NXP
* Author: Daniel Baluta <[email protected]>
*
* Implementation of the DSP IPC interface (host side)
*/
#include <linux/firmware/imx/dsp.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
/*
* imx_dsp_ring_doorbell - triggers an interrupt on the other side (DSP)
*
* @dsp: DSP IPC handle
* @chan_idx: index of the channel where to trigger the interrupt
*
* Returns non-negative value for success, negative value for error
*/
int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, unsigned int idx)
{
int ret;
struct imx_dsp_chan *dsp_chan;
if (idx >= DSP_MU_CHAN_NUM)
return -EINVAL;
dsp_chan = &ipc->chans[idx];
ret = mbox_send_message(dsp_chan->ch, NULL);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL(imx_dsp_ring_doorbell);
/*
* imx_dsp_handle_rx - rx callback used by imx mailbox
*
* @c: mbox client
* @msg: message received
*
* Users of DSP IPC will need to privde handle_reply and handle_request
* callbacks.
*/
static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
{
struct imx_dsp_chan *chan = container_of(c, struct imx_dsp_chan, cl);
if (chan->idx == 0) {
chan->ipc->ops->handle_reply(chan->ipc);
} else {
chan->ipc->ops->handle_request(chan->ipc);
imx_dsp_ring_doorbell(chan->ipc, 1);
}
}
struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
{
struct imx_dsp_chan *dsp_chan;
if (idx >= DSP_MU_CHAN_NUM)
return ERR_PTR(-EINVAL);
dsp_chan = &dsp_ipc->chans[idx];
dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
return dsp_chan->ch;
}
EXPORT_SYMBOL(imx_dsp_request_channel);
void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
{
struct imx_dsp_chan *dsp_chan;
if (idx >= DSP_MU_CHAN_NUM)
return;
dsp_chan = &dsp_ipc->chans[idx];
mbox_free_channel(dsp_chan->ch);
}
EXPORT_SYMBOL(imx_dsp_free_channel);
static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
{
struct device *dev = dsp_ipc->dev;
struct imx_dsp_chan *dsp_chan;
struct mbox_client *cl;
char *chan_name;
int ret;
int i, j;
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
if (i < 2)
chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
else
chan_name = kasprintf(GFP_KERNEL, "rxdb%d", i - 2);
if (!chan_name)
return -ENOMEM;
dsp_chan = &dsp_ipc->chans[i];
dsp_chan->name = chan_name;
cl = &dsp_chan->cl;
cl->dev = dev;
cl->tx_block = false;
cl->knows_txdone = true;
cl->rx_callback = imx_dsp_handle_rx;
dsp_chan->ipc = dsp_ipc;
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
goto out;
}
dev_dbg(dev, "request mbox chan %s\n", chan_name);
}
return 0;
out:
for (j = 0; j < i; j++) {
dsp_chan = &dsp_ipc->chans[j];
mbox_free_channel(dsp_chan->ch);
kfree(dsp_chan->name);
}
return ret;
}
static int imx_dsp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx_dsp_ipc *dsp_ipc;
int ret;
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
if (!dsp_ipc)
return -ENOMEM;
dsp_ipc->dev = dev;
dev_set_drvdata(dev, dsp_ipc);
ret = imx_dsp_setup_channels(dsp_ipc);
if (ret < 0)
return ret;
dev_info(dev, "NXP i.MX DSP IPC initialized\n");
return 0;
}
static int imx_dsp_remove(struct platform_device *pdev)
{
struct imx_dsp_chan *dsp_chan;
struct imx_dsp_ipc *dsp_ipc;
int i;
dsp_ipc = dev_get_drvdata(&pdev->dev);
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
dsp_chan = &dsp_ipc->chans[i];
mbox_free_channel(dsp_chan->ch);
kfree(dsp_chan->name);
}
return 0;
}
static struct platform_driver imx_dsp_driver = {
.driver = {
.name = "imx-dsp",
},
.probe = imx_dsp_probe,
.remove = imx_dsp_remove,
};
builtin_platform_driver(imx_dsp_driver);
MODULE_AUTHOR("Daniel Baluta <[email protected]>");
MODULE_DESCRIPTION("IMX DSP IPC protocol driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/imx/imx-dsp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019,2023 NXP
*
* Implementation of the SCU IRQ functions using MU.
*
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/kobject.h>
#include <linux/mailbox_client.h>
#include <linux/of.h>
#include <linux/suspend.h>
#include <linux/sysfs.h>
#define IMX_SC_IRQ_FUNC_ENABLE 1
#define IMX_SC_IRQ_FUNC_STATUS 2
#define IMX_SC_IRQ_NUM_GROUP 9
static u32 mu_resource_id;
struct imx_sc_msg_irq_get_status {
struct imx_sc_rpc_msg hdr;
union {
struct {
u16 resource;
u8 group;
u8 reserved;
} __packed req;
struct {
u32 status;
} resp;
} data;
};
struct imx_sc_msg_irq_enable {
struct imx_sc_rpc_msg hdr;
u32 mask;
u16 resource;
u8 group;
u8 enable;
} __packed;
struct scu_wakeup {
u32 mask;
u32 wakeup_src;
bool valid;
};
/* Sysfs functions */
static struct kobject *wakeup_obj;
static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf);
static struct kobj_attribute wakeup_source_attr =
__ATTR(wakeup_src, 0660, wakeup_source_show, NULL);
static struct scu_wakeup scu_irq_wakeup[IMX_SC_IRQ_NUM_GROUP];
static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
static struct work_struct imx_sc_irq_work;
static BLOCKING_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
int imx_scu_irq_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(
&imx_scu_irq_notifier_chain, nb);
}
EXPORT_SYMBOL(imx_scu_irq_register_notifier);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(
&imx_scu_irq_notifier_chain, nb);
}
EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
{
return blocking_notifier_call_chain(&imx_scu_irq_notifier_chain,
status, (void *)group);
}
static void imx_scu_irq_work_handler(struct work_struct *work)
{
u32 irq_status;
int ret;
u8 i;
for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
if (scu_irq_wakeup[i].mask) {
scu_irq_wakeup[i].valid = false;
scu_irq_wakeup[i].wakeup_src = 0;
}
ret = imx_scu_irq_get_status(i, &irq_status);
if (ret) {
pr_err("get irq group %d status failed, ret %d\n",
i, ret);
return;
}
if (!irq_status)
continue;
if (scu_irq_wakeup[i].mask & irq_status) {
scu_irq_wakeup[i].valid = true;
scu_irq_wakeup[i].wakeup_src = irq_status & scu_irq_wakeup[i].mask;
} else {
scu_irq_wakeup[i].wakeup_src = irq_status;
}
pm_system_wakeup();
imx_scu_irq_notifier_call_chain(irq_status, &i);
}
}
int imx_scu_irq_get_status(u8 group, u32 *irq_status)
{
struct imx_sc_msg_irq_get_status msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_IRQ;
hdr->func = IMX_SC_IRQ_FUNC_STATUS;
hdr->size = 2;
msg.data.req.resource = mu_resource_id;
msg.data.req.group = group;
ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
if (ret)
return ret;
if (irq_status)
*irq_status = msg.data.resp.status;
return 0;
}
EXPORT_SYMBOL(imx_scu_irq_get_status);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
{
struct imx_sc_msg_irq_enable msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
if (!imx_sc_irq_ipc_handle)
return -EPROBE_DEFER;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_IRQ;
hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
hdr->size = 3;
msg.resource = mu_resource_id;
msg.group = group;
msg.mask = mask;
msg.enable = enable;
ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
if (ret)
pr_err("enable irq failed, group %d, mask %d, ret %d\n",
group, mask, ret);
if (enable)
scu_irq_wakeup[group].mask |= mask;
else
scu_irq_wakeup[group].mask &= ~mask;
return ret;
}
EXPORT_SYMBOL(imx_scu_irq_group_enable);
static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
{
schedule_work(&imx_sc_irq_work);
}
static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int i;
for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
if (!scu_irq_wakeup[i].wakeup_src)
continue;
if (scu_irq_wakeup[i].valid)
sprintf(buf, "Wakeup source group = %d, irq = 0x%x\n",
i, scu_irq_wakeup[i].wakeup_src);
else
sprintf(buf, "Spurious SCU wakeup, group = %d, irq = 0x%x\n",
i, scu_irq_wakeup[i].wakeup_src);
}
return strlen(buf);
}
int imx_scu_enable_general_irq_channel(struct device *dev)
{
struct of_phandle_args spec;
struct mbox_client *cl;
struct mbox_chan *ch;
int ret = 0, i = 0;
ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
if (ret)
return ret;
cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
if (!cl)
return -ENOMEM;
cl->dev = dev;
cl->rx_callback = imx_scu_irq_callback;
/* SCU general IRQ uses general interrupt channel 3 */
ch = mbox_request_channel_byname(cl, "gip3");
if (IS_ERR(ch)) {
ret = PTR_ERR(ch);
dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
devm_kfree(dev, cl);
return ret;
}
INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", 0, &spec))
i = of_alias_get_id(spec.np, "mu");
/* use mu1 as general mu irq channel if failed */
if (i < 0)
i = 1;
mu_resource_id = IMX_SC_R_MU_0A + i;
/* Create directory under /sysfs/firmware */
wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
if (!wakeup_obj) {
ret = -ENOMEM;
goto free_ch;
}
ret = sysfs_create_file(wakeup_obj, &wakeup_source_attr.attr);
if (ret) {
dev_err(dev, "Cannot create wakeup source src file......\n");
kobject_put(wakeup_obj);
goto free_ch;
}
return 0;
free_ch:
mbox_free_channel(ch);
return ret;
}
EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
| linux-master | drivers/firmware/imx/imx-scu-irq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
* Author: Dong Aisheng <[email protected]>
*
* File containing client-side RPC functions for the MISC service. These
* function are ported to clients that communicate to the SC.
*
*/
#include <linux/firmware/imx/svc/misc.h>
struct imx_sc_msg_req_misc_set_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
u32 val;
u16 resource;
} __packed __aligned(4);
struct imx_sc_msg_req_cpu_start {
struct imx_sc_rpc_msg hdr;
u32 address_hi;
u32 address_lo;
u16 resource;
u8 enable;
} __packed __aligned(4);
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
u16 resource;
} __packed __aligned(4);
struct imx_sc_msg_resp_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 val;
} __packed __aligned(4);
/*
* This function sets a miscellaneous control value.
*
* @param[in] ipc IPC handle
* @param[in] resource resource the control is associated with
* @param[in] ctrl control to change
* @param[in] val value to apply to the control
*
* @return Returns 0 for success and < 0 for errors.
*/
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val)
{
struct imx_sc_msg_req_misc_set_ctrl msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
hdr->func = (uint8_t)IMX_SC_MISC_FUNC_SET_CONTROL;
hdr->size = 4;
msg.ctrl = ctrl;
msg.val = val;
msg.resource = resource;
return imx_scu_call_rpc(ipc, &msg, true);
}
EXPORT_SYMBOL(imx_sc_misc_set_control);
/*
* This function gets a miscellaneous control value.
*
* @param[in] ipc IPC handle
* @param[in] resource resource the control is associated with
* @param[in] ctrl control to get
* @param[out] val pointer to return the control value
*
* @return Returns 0 for success and < 0 for errors.
*/
int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 *val)
{
struct imx_sc_msg_req_misc_get_ctrl msg;
struct imx_sc_msg_resp_misc_get_ctrl *resp;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
int ret;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = (uint8_t)IMX_SC_RPC_SVC_MISC;
hdr->func = (uint8_t)IMX_SC_MISC_FUNC_GET_CONTROL;
hdr->size = 3;
msg.ctrl = ctrl;
msg.resource = resource;
ret = imx_scu_call_rpc(ipc, &msg, true);
if (ret)
return ret;
resp = (struct imx_sc_msg_resp_misc_get_ctrl *)&msg;
if (val != NULL)
*val = resp->val;
return 0;
}
EXPORT_SYMBOL(imx_sc_misc_get_control);
/*
* This function starts/stops a CPU identified by @resource
*
* @param[in] ipc IPC handle
* @param[in] resource resource the control is associated with
* @param[in] enable true for start, false for stop
* @param[in] phys_addr initial instruction address to be executed
*
* @return Returns 0 for success and < 0 for errors.
*/
int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
bool enable, u64 phys_addr)
{
struct imx_sc_msg_req_cpu_start msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
hdr->ver = IMX_SC_RPC_VERSION;
hdr->svc = IMX_SC_RPC_SVC_PM;
hdr->func = IMX_SC_PM_FUNC_CPU_START;
hdr->size = 4;
msg.address_hi = phys_addr >> 32;
msg.address_lo = phys_addr;
msg.resource = resource;
msg.enable = enable;
return imx_scu_call_rpc(ipc, &msg, true);
}
EXPORT_SYMBOL(imx_sc_pm_cpu_start);
| linux-master | drivers/firmware/imx/misc.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2018 NXP
* Author: Dong Aisheng <[email protected]>
*
* Implementation of the SCU IPC functions using MUs (client side).
*
*/
#include <linux/err.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#define SCU_MU_CHAN_NUM 8
#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
struct imx_sc_chan {
struct imx_sc_ipc *sc_ipc;
struct mbox_client cl;
struct mbox_chan *ch;
int idx;
struct completion tx_done;
};
struct imx_sc_ipc {
/* SCU uses 4 Tx and 4 Rx channels */
struct imx_sc_chan chans[SCU_MU_CHAN_NUM];
struct device *dev;
struct mutex lock;
struct completion done;
bool fast_ipc;
/* temporarily store the SCU msg */
u32 *msg;
u8 rx_size;
u8 count;
};
/*
* This type is used to indicate error response for most functions.
*/
enum imx_sc_error_codes {
IMX_SC_ERR_NONE = 0, /* Success */
IMX_SC_ERR_VERSION = 1, /* Incompatible API version */
IMX_SC_ERR_CONFIG = 2, /* Configuration error */
IMX_SC_ERR_PARM = 3, /* Bad parameter */
IMX_SC_ERR_NOACCESS = 4, /* Permission error (no access) */
IMX_SC_ERR_LOCKED = 5, /* Permission error (locked) */
IMX_SC_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
IMX_SC_ERR_NOTFOUND = 7, /* Not found */
IMX_SC_ERR_NOPOWER = 8, /* No power */
IMX_SC_ERR_IPC = 9, /* Generic IPC error */
IMX_SC_ERR_BUSY = 10, /* Resource is currently busy/active */
IMX_SC_ERR_FAIL = 11, /* General I/O failure */
IMX_SC_ERR_LAST
};
static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
0, /* IMX_SC_ERR_NONE */
-EINVAL, /* IMX_SC_ERR_VERSION */
-EINVAL, /* IMX_SC_ERR_CONFIG */
-EINVAL, /* IMX_SC_ERR_PARM */
-EACCES, /* IMX_SC_ERR_NOACCESS */
-EACCES, /* IMX_SC_ERR_LOCKED */
-ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
-EEXIST, /* IMX_SC_ERR_NOTFOUND */
-EPERM, /* IMX_SC_ERR_NOPOWER */
-EPIPE, /* IMX_SC_ERR_IPC */
-EBUSY, /* IMX_SC_ERR_BUSY */
-EIO, /* IMX_SC_ERR_FAIL */
};
static struct imx_sc_ipc *imx_sc_ipc_handle;
static inline int imx_sc_to_linux_errno(int errno)
{
if (errno >= IMX_SC_ERR_NONE && errno < IMX_SC_ERR_LAST)
return imx_sc_linux_errmap[errno];
return -EIO;
}
/*
* Get the default handle used by SCU
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc)
{
if (!imx_sc_ipc_handle)
return -EPROBE_DEFER;
*ipc = imx_sc_ipc_handle;
return 0;
}
EXPORT_SYMBOL(imx_scu_get_handle);
/* Callback called when the word of a message is ack-ed, eg read by SCU */
static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
{
struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
complete(&sc_chan->tx_done);
}
static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
{
struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
int i;
if (!sc_ipc->msg) {
dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
sc_chan->idx, *data);
return;
}
if (sc_ipc->fast_ipc) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
sc_ipc->msg[0] = *data++;
for (i = 1; i < sc_ipc->rx_size; i++)
sc_ipc->msg[i] = *data++;
complete(&sc_ipc->done);
return;
}
if (sc_chan->idx == 0) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
dev_dbg(sc_ipc->dev, "msg rx size %u\n", sc_ipc->rx_size);
if (sc_ipc->rx_size > 4)
dev_warn(sc_ipc->dev, "RPC does not support receiving over 4 words: %u\n",
sc_ipc->rx_size);
}
sc_ipc->msg[sc_chan->idx] = *data;
sc_ipc->count++;
dev_dbg(sc_ipc->dev, "mu %u msg %u 0x%x\n", sc_chan->idx,
sc_ipc->count, *data);
if ((sc_ipc->rx_size != 0) && (sc_ipc->count == sc_ipc->rx_size))
complete(&sc_ipc->done);
}
static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
{
struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
struct imx_sc_chan *sc_chan;
u32 *data = msg;
int ret;
int size;
int i;
/* Check size */
if (hdr.size > IMX_SC_RPC_MAX_MSG)
return -EINVAL;
dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
hdr.func, hdr.size);
size = sc_ipc->fast_ipc ? 1 : hdr.size;
for (i = 0; i < size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
/*
* SCU requires that all messages words are written
* sequentially but linux MU driver implements multiple
* independent channels for each register so ordering between
* different channels must be ensured by SCU API interface.
*
* Wait for tx_done before every send to ensure that no
* queueing happens at the mailbox channel level.
*/
if (!sc_ipc->fast_ipc) {
wait_for_completion(&sc_chan->tx_done);
reinit_completion(&sc_chan->tx_done);
}
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
return ret;
}
return 0;
}
/*
* RPC command/response
*/
int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
{
uint8_t saved_svc, saved_func;
struct imx_sc_rpc_msg *hdr;
int ret;
if (WARN_ON(!sc_ipc || !msg))
return -EINVAL;
mutex_lock(&sc_ipc->lock);
reinit_completion(&sc_ipc->done);
if (have_resp) {
sc_ipc->msg = msg;
saved_svc = ((struct imx_sc_rpc_msg *)msg)->svc;
saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
}
sc_ipc->count = 0;
ret = imx_scu_ipc_write(sc_ipc, msg);
if (ret < 0) {
dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
goto out;
}
if (have_resp) {
if (!wait_for_completion_timeout(&sc_ipc->done,
MAX_RX_TIMEOUT)) {
dev_err(sc_ipc->dev, "RPC send msg timeout\n");
mutex_unlock(&sc_ipc->lock);
return -ETIMEDOUT;
}
/* response status is stored in hdr->func field */
hdr = msg;
ret = hdr->func;
/*
* Some special SCU firmware APIs do NOT have return value
* in hdr->func, but they do have response data, those special
* APIs are defined as void function in SCU firmware, so they
* should be treated as return success always.
*/
if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
(saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
ret = 0;
}
out:
sc_ipc->msg = NULL;
mutex_unlock(&sc_ipc->lock);
dev_dbg(sc_ipc->dev, "RPC SVC done\n");
return imx_sc_to_linux_errno(ret);
}
EXPORT_SYMBOL(imx_scu_call_rpc);
static int imx_scu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct imx_sc_ipc *sc_ipc;
struct imx_sc_chan *sc_chan;
struct mbox_client *cl;
char *chan_name;
struct of_phandle_args args;
int num_channel;
int ret;
int i;
sc_ipc = devm_kzalloc(dev, sizeof(*sc_ipc), GFP_KERNEL);
if (!sc_ipc)
return -ENOMEM;
ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
"#mbox-cells", 0, &args);
if (ret)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
if (i < num_channel / 2)
chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
else
chan_name = kasprintf(GFP_KERNEL, "rx%d",
i - num_channel / 2);
if (!chan_name)
return -ENOMEM;
sc_chan = &sc_ipc->chans[i];
cl = &sc_chan->cl;
cl->dev = dev;
cl->tx_block = false;
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
if (!sc_ipc->fast_ipc) {
/* Initial tx_done completion as "done" */
cl->tx_done = imx_scu_tx_done;
init_completion(&sc_chan->tx_done);
complete(&sc_chan->tx_done);
}
sc_chan->sc_ipc = sc_ipc;
sc_chan->idx = i % (num_channel / 2);
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);
dev_err_probe(dev, ret, "Failed to request mbox chan %s\n",
chan_name);
kfree(chan_name);
return ret;
}
dev_dbg(dev, "request mbox chan %s\n", chan_name);
/* chan_name is not used anymore by framework */
kfree(chan_name);
}
sc_ipc->dev = dev;
mutex_init(&sc_ipc->lock);
init_completion(&sc_ipc->done);
imx_sc_ipc_handle = sc_ipc;
ret = imx_scu_soc_init(dev);
if (ret)
dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
ret = imx_scu_enable_general_irq_channel(dev);
if (ret)
dev_warn(dev,
"failed to enable general irq channel: %d\n", ret);
dev_info(dev, "NXP i.MX SCU Initialized\n");
return devm_of_platform_populate(dev);
}
static const struct of_device_id imx_scu_match[] = {
{ .compatible = "fsl,imx-scu", },
{ /* Sentinel */ }
};
static struct platform_driver imx_scu_driver = {
.driver = {
.name = "imx-scu",
.of_match_table = imx_scu_match,
},
.probe = imx_scu_probe,
};
static int __init imx_scu_driver_init(void)
{
return platform_driver_register(&imx_scu_driver);
}
subsys_initcall_sync(imx_scu_driver_init);
MODULE_AUTHOR("Dong Aisheng <[email protected]>");
MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/imx/imx-scu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/arm_ffa.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "common.h"
static DEFINE_IDA(ffa_bus_id);
static int ffa_device_match(struct device *dev, struct device_driver *drv)
{
const struct ffa_device_id *id_table;
struct ffa_device *ffa_dev;
id_table = to_ffa_driver(drv)->id_table;
ffa_dev = to_ffa_dev(dev);
while (!uuid_is_null(&id_table->uuid)) {
/*
* FF-A v1.0 doesn't provide discovery of UUIDs, just the
* partition IDs, so fetch the partitions IDs for this
* id_table UUID and assign the UUID to the device if the
* partition ID matches
*/
if (uuid_is_null(&ffa_dev->uuid))
ffa_device_match_uuid(ffa_dev, &id_table->uuid);
if (uuid_equal(&ffa_dev->uuid, &id_table->uuid))
return 1;
id_table++;
}
return 0;
}
static int ffa_device_probe(struct device *dev)
{
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
struct ffa_device *ffa_dev = to_ffa_dev(dev);
return ffa_drv->probe(ffa_dev);
}
static void ffa_device_remove(struct device *dev)
{
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
if (ffa_drv->remove)
ffa_drv->remove(to_ffa_dev(dev));
}
static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
ffa_dev->vm_id, &ffa_dev->uuid);
}
static ssize_t partition_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
return sprintf(buf, "0x%04x\n", ffa_dev->vm_id);
}
static DEVICE_ATTR_RO(partition_id);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
return sprintf(buf, "%pUb\n", &ffa_dev->uuid);
}
static DEVICE_ATTR_RO(uuid);
static struct attribute *ffa_device_attributes_attrs[] = {
&dev_attr_partition_id.attr,
&dev_attr_uuid.attr,
NULL,
};
ATTRIBUTE_GROUPS(ffa_device_attributes);
struct bus_type ffa_bus_type = {
.name = "arm_ffa",
.match = ffa_device_match,
.probe = ffa_device_probe,
.remove = ffa_device_remove,
.uevent = ffa_device_uevent,
.dev_groups = ffa_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(ffa_bus_type);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name)
{
int ret;
if (!driver->probe)
return -EINVAL;
driver->driver.bus = &ffa_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
ret = driver_register(&driver->driver);
if (!ret)
pr_debug("registered new ffa driver %s\n", driver->name);
return ret;
}
EXPORT_SYMBOL_GPL(ffa_driver_register);
void ffa_driver_unregister(struct ffa_driver *driver)
{
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(ffa_driver_unregister);
static void ffa_release_device(struct device *dev)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
ida_free(&ffa_bus_id, ffa_dev->id);
kfree(ffa_dev);
}
static int __ffa_devices_unregister(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
static void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
bool valid = false;
struct device *dev = NULL;
struct ffa_device *tmp_dev;
do {
dev = bus_find_next_device(&ffa_bus_type, dev);
tmp_dev = to_ffa_dev(dev);
if (tmp_dev == ffa_dev) {
valid = true;
break;
}
put_device(dev);
} while (dev);
put_device(dev);
return valid;
}
struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
const struct ffa_ops *ops)
{
int id, ret;
struct device *dev;
struct ffa_device *ffa_dev;
id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
if (id < 0)
return NULL;
ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
if (!ffa_dev) {
ida_free(&ffa_bus_id, id);
return NULL;
}
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->vm_id = vm_id;
ffa_dev->ops = ops;
uuid_copy(&ffa_dev->uuid, uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
dev_err(dev, "unable to register device %s err=%d\n",
dev_name(dev), ret);
put_device(dev);
return NULL;
}
return ffa_dev;
}
EXPORT_SYMBOL_GPL(ffa_device_register);
void ffa_device_unregister(struct ffa_device *ffa_dev)
{
if (!ffa_dev)
return;
device_unregister(&ffa_dev->dev);
}
EXPORT_SYMBOL_GPL(ffa_device_unregister);
int arm_ffa_bus_init(void)
{
return bus_register(&ffa_bus_type);
}
void arm_ffa_bus_exit(void)
{
ffa_devices_unregister();
bus_unregister(&ffa_bus_type);
ida_destroy(&ffa_bus_id);
}
| linux-master | drivers/firmware/arm_ffa/bus.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Ltd.
*/
#include <linux/printk.h>
#include "common.h"
static void __arm_ffa_fn_smc(ffa_value_t args, ffa_value_t *res)
{
arm_smccc_1_2_smc(&args, res);
}
static void __arm_ffa_fn_hvc(ffa_value_t args, ffa_value_t *res)
{
arm_smccc_1_2_hvc(&args, res);
}
int __init ffa_transport_init(ffa_fn **invoke_ffa_fn)
{
enum arm_smccc_conduit conduit;
if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
return -EOPNOTSUPP;
conduit = arm_smccc_1_1_get_conduit();
if (conduit == SMCCC_CONDUIT_NONE) {
pr_err("%s: invalid SMCCC conduit\n", __func__);
return -EOPNOTSUPP;
}
if (conduit == SMCCC_CONDUIT_SMC)
*invoke_ffa_fn = __arm_ffa_fn_smc;
else
*invoke_ffa_fn = __arm_ffa_fn_hvc;
return 0;
}
| linux-master | drivers/firmware/arm_ffa/smccc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Arm Firmware Framework for ARMv8-A(FFA) interface driver
*
* The Arm FFA specification[1] describes a software architecture to
* leverages the virtualization extension to isolate software images
* provided by an ecosystem of vendors from each other and describes
* interfaces that standardize communication between the various software
* images including communication between images in the Secure world and
* Normal world. Any Hypervisor could use the FFA interfaces to enable
* communication between VMs it manages.
*
* The Hypervisor a.k.a Partition managers in FFA terminology can assign
* system resources(Memory regions, Devices, CPU cycles) to the partitions
* and manage isolation amongst them.
*
* [1] https://developer.arm.com/docs/den0077/latest
*
* Copyright (C) 2021 ARM Ltd.
*/
#define DRIVER_NAME "ARM FF-A"
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/arm_ffa.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/uuid.h>
#include "common.h"
#define FFA_DRIVER_VERSION FFA_VERSION_1_0
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
#define RECEIVER_ID_MASK GENMASK(15, 0)
#define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
#define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
#define PACK_TARGET_INFO(s, r) \
(FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
/*
* Keeping RX TX buffer size as 4K for now
* 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
*/
#define RXTX_BUFFER_SIZE SZ_4K
static ffa_fn *invoke_ffa_fn;
static const int ffa_linux_errmap[] = {
/* better than switch case as long as return value is continuous */
0, /* FFA_RET_SUCCESS */
-EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */
-EINVAL, /* FFA_RET_INVALID_PARAMETERS */
-ENOMEM, /* FFA_RET_NO_MEMORY */
-EBUSY, /* FFA_RET_BUSY */
-EINTR, /* FFA_RET_INTERRUPTED */
-EACCES, /* FFA_RET_DENIED */
-EAGAIN, /* FFA_RET_RETRY */
-ECANCELED, /* FFA_RET_ABORTED */
};
static inline int ffa_to_linux_errno(int errno)
{
int err_idx = -errno;
if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
return ffa_linux_errmap[err_idx];
return -EINVAL;
}
struct ffa_drv_info {
u32 version;
u16 vm_id;
struct mutex rx_lock; /* lock to protect Rx buffer */
struct mutex tx_lock; /* lock to protect Tx buffer */
void *rx_buffer;
void *tx_buffer;
bool mem_ops_native;
};
static struct ffa_drv_info *drv_info;
/*
* The driver must be able to support all the versions from the earliest
* supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
* The specification states that if firmware supports a FFA implementation
* that is incompatible with and at a greater version number than specified
* by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
* it must return the NOT_SUPPORTED error code.
*/
static u32 ffa_compatible_version_find(u32 version)
{
u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
return version;
pr_info("Firmware version higher than driver version, downgrading\n");
return FFA_DRIVER_VERSION;
}
static int ffa_version_check(u32 *version)
{
ffa_value_t ver;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
FFA_MAJOR_VERSION(FFA_MIN_VERSION),
FFA_MINOR_VERSION(FFA_MIN_VERSION));
return -EINVAL;
}
pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
FFA_MINOR_VERSION(ver.a0));
*version = ffa_compatible_version_find(ver.a0);
return 0;
}
static int ffa_rx_release(void)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_RX_RELEASE,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
/* check for ret.a0 == FFA_RX_RELEASE ? */
return 0;
}
static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_FN_NATIVE(RXTX_MAP),
.a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
static int ffa_rxtx_unmap(u16 vm_id)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
static int
__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_partitions)
{
int idx, count, flags = 0, sz, buf_sz;
ffa_value_t partition_info;
if (drv_info->version > FFA_VERSION_1_0 &&
(!buffer || !num_partitions)) /* Just get the count for now */
flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
mutex_lock(&drv_info->rx_lock);
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_PARTITION_INFO_GET,
.a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
.a5 = flags,
}, &partition_info);
if (partition_info.a0 == FFA_ERROR) {
mutex_unlock(&drv_info->rx_lock);
return ffa_to_linux_errno((int)partition_info.a2);
}
count = partition_info.a2;
if (drv_info->version > FFA_VERSION_1_0) {
buf_sz = sz = partition_info.a3;
if (sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
} else {
/* FFA_VERSION_1_0 lacks size in the response */
buf_sz = sz = 8;
}
if (buffer && count <= num_partitions)
for (idx = 0; idx < count; idx++)
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
buf_sz);
ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);
return count;
}
/* buffer is allocated and caller must free the same if returned count > 0 */
static int
ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
{
int count;
u32 uuid0_4[4];
struct ffa_partition_info *pbuf;
export_uuid((u8 *)uuid0_4, uuid);
count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
uuid0_4[3], NULL, 0);
if (count <= 0)
return count;
pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
if (!pbuf)
return -ENOMEM;
count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
uuid0_4[3], pbuf, count);
if (count <= 0)
kfree(pbuf);
else
*buffer = pbuf;
return count;
}
#define VM_ID_MASK GENMASK(15, 0)
static int ffa_id_get(u16 *vm_id)
{
ffa_value_t id;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_ID_GET,
}, &id);
if (id.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)id.a2);
*vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
return 0;
}
static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
struct ffa_send_direct_data *data)
{
u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
ffa_value_t ret;
if (mode_32bit) {
req_id = FFA_MSG_SEND_DIRECT_REQ;
resp_id = FFA_MSG_SEND_DIRECT_RESP;
} else {
req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
}
invoke_ffa_fn((ffa_value_t){
.a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
.a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
.a6 = data->data3, .a7 = data->data4,
}, &ret);
while (ret.a0 == FFA_INTERRUPT)
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_RUN, .a1 = ret.a1,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
if (ret.a0 == resp_id) {
data->data0 = ret.a3;
data->data1 = ret.a4;
data->data2 = ret.a5;
data->data3 = ret.a6;
data->data4 = ret.a7;
return 0;
}
return -EINVAL;
}
static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
u32 frag_len, u32 len, u64 *handle)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = func_id, .a1 = len, .a2 = frag_len,
.a3 = buf, .a4 = buf_sz,
}, &ret);
while (ret.a0 == FFA_MEM_OP_PAUSE)
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_MEM_OP_RESUME,
.a1 = ret.a1, .a2 = ret.a2,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
if (ret.a0 == FFA_SUCCESS) {
if (handle)
*handle = PACK_HANDLE(ret.a2, ret.a3);
} else if (ret.a0 == FFA_MEM_FRAG_RX) {
if (handle)
*handle = PACK_HANDLE(ret.a1, ret.a2);
} else {
return -EOPNOTSUPP;
}
return frag_len;
}
static int ffa_mem_next_frag(u64 handle, u32 frag_len)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_MEM_FRAG_TX,
.a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
.a3 = frag_len,
}, &ret);
while (ret.a0 == FFA_MEM_OP_PAUSE)
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_MEM_OP_RESUME,
.a1 = ret.a1, .a2 = ret.a2,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
if (ret.a0 == FFA_MEM_FRAG_RX)
return ret.a3;
else if (ret.a0 == FFA_SUCCESS)
return 0;
return -EOPNOTSUPP;
}
static int
ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
u32 len, u64 *handle, bool first)
{
if (!first)
return ffa_mem_next_frag(*handle, frag_len);
return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
}
static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
{
u32 num_pages = 0;
do {
num_pages += sg->length / FFA_PAGE_SIZE;
} while ((sg = sg_next(sg)));
return num_pages;
}
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
{
int rc = 0;
bool first = true;
phys_addr_t addr = 0;
struct ffa_composite_mem_region *composite;
struct ffa_mem_region_addr_range *constituents;
struct ffa_mem_region_attributes *ep_mem_access;
struct ffa_mem_region *mem_region = buffer;
u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
mem_region->tag = args->tag;
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
FFA_MEM_INNER_SHAREABLE;
ep_mem_access = &mem_region->ep_mem_access[0];
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
}
mem_region->handle = 0;
mem_region->reserved_0 = 0;
mem_region->reserved_1 = 0;
mem_region->ep_count = args->nattrs;
composite = buffer + COMPOSITE_OFFSET(args->nattrs);
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
composite->addr_range_cnt = num_entries;
composite->reserved = 0;
length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
if (frag_len > max_fragsize)
return -ENXIO;
if (!args->use_txbuf) {
addr = virt_to_phys(buffer);
buf_sz = max_fragsize / FFA_PAGE_SIZE;
}
constituents = buffer + frag_len;
idx = 0;
do {
if (frag_len == max_fragsize) {
rc = ffa_transmit_fragment(func_id, addr, buf_sz,
frag_len, length,
&args->g_handle, first);
if (rc < 0)
return -ENXIO;
first = false;
idx = 0;
frag_len = 0;
constituents = buffer;
}
if ((void *)constituents - buffer > max_fragsize) {
pr_err("Memory Region Fragment > Tx Buffer size\n");
return -EFAULT;
}
constituents->address = sg_phys(args->sg);
constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
constituents->reserved = 0;
constituents++;
frag_len += sizeof(struct ffa_mem_region_addr_range);
} while ((args->sg = sg_next(args->sg)));
return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
length, &args->g_handle, first);
}
static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
{
int ret;
void *buffer;
if (!args->use_txbuf) {
buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
} else {
buffer = drv_info->tx_buffer;
mutex_lock(&drv_info->tx_lock);
}
ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
if (args->use_txbuf)
mutex_unlock(&drv_info->tx_lock);
else
free_pages_exact(buffer, RXTX_BUFFER_SIZE);
return ret < 0 ? ret : 0;
}
static int ffa_memory_reclaim(u64 g_handle, u32 flags)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_MEM_RECLAIM,
.a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
.a3 = flags,
}, &ret);
if (ret.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)ret.a2);
return 0;
}
static int ffa_features(u32 func_feat_id, u32 input_props,
u32 *if_props_1, u32 *if_props_2)
{
ffa_value_t id;
if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
pr_err("%s: Invalid Parameters: %x, %x", __func__,
func_feat_id, input_props);
return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
}
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
}, &id);
if (id.a0 == FFA_ERROR)
return ffa_to_linux_errno((int)id.a2);
if (if_props_1)
*if_props_1 = id.a2;
if (if_props_2)
*if_props_2 = id.a3;
return 0;
}
static void ffa_set_up_mem_ops_native_flag(void)
{
if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
!ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
drv_info->mem_ops_native = true;
}
static u32 ffa_api_version_get(void)
{
return drv_info->version;
}
static int ffa_partition_info_get(const char *uuid_str,
struct ffa_partition_info *buffer)
{
int count;
uuid_t uuid;
struct ffa_partition_info *pbuf;
if (uuid_parse(uuid_str, &uuid)) {
pr_err("invalid uuid (%s)\n", uuid_str);
return -ENODEV;
}
count = ffa_partition_probe(&uuid, &pbuf);
if (count <= 0)
return -ENOENT;
memcpy(buffer, pbuf, sizeof(*pbuf) * count);
kfree(pbuf);
return 0;
}
static void _ffa_mode_32bit_set(struct ffa_device *dev)
{
dev->mode_32bit = true;
}
static void ffa_mode_32bit_set(struct ffa_device *dev)
{
if (drv_info->version > FFA_VERSION_1_0)
return;
_ffa_mode_32bit_set(dev);
}
static int ffa_sync_send_receive(struct ffa_device *dev,
struct ffa_send_direct_data *data)
{
return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
dev->mode_32bit, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
if (drv_info->mem_ops_native)
return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
return ffa_memory_ops(FFA_MEM_SHARE, args);
}
static int ffa_memory_lend(struct ffa_mem_ops_args *args)
{
/* Note that upon a successful MEM_LEND request the caller
* must ensure that the memory region specified is not accessed
* until a successful MEM_RECALIM call has been made.
* On systems with a hypervisor present this will been enforced,
* however on systems without a hypervisor the responsibility
* falls to the calling kernel driver to prevent access.
*/
if (drv_info->mem_ops_native)
return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
return ffa_memory_ops(FFA_MEM_LEND, args);
}
static const struct ffa_info_ops ffa_drv_info_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
};
static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
};
static const struct ffa_mem_ops ffa_drv_mem_ops = {
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
.memory_lend = ffa_memory_lend,
};
static const struct ffa_ops ffa_drv_ops = {
.info_ops = &ffa_drv_info_ops,
.msg_ops = &ffa_drv_msg_ops,
.mem_ops = &ffa_drv_mem_ops,
};
void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
{
int count, idx;
struct ffa_partition_info *pbuf, *tpbuf;
/*
* FF-A v1.1 provides UUID for each partition as part of the discovery
* API, the discovered UUID must be populated in the device's UUID and
* there is no need to copy the same from the driver table.
*/
if (drv_info->version > FFA_VERSION_1_0)
return;
count = ffa_partition_probe(uuid, &pbuf);
if (count <= 0)
return;
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
if (tpbuf->id == ffa_dev->vm_id)
uuid_copy(&ffa_dev->uuid, uuid);
kfree(pbuf);
}
static void ffa_setup_partitions(void)
{
int count, idx;
uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_partition_info *pbuf, *tpbuf;
count = ffa_partition_probe(&uuid_null, &pbuf);
if (count <= 0) {
pr_info("%s: No partitions found, error %d\n", __func__, count);
return;
}
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
import_uuid(&uuid, (u8 *)tpbuf->uuid);
/* Note that if the UUID will be uuid_null, that will require
* ffa_device_match() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
*/
ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
_ffa_mode_32bit_set(ffa_dev);
}
kfree(pbuf);
}
static int __init ffa_init(void)
{
int ret;
ret = ffa_transport_init(&invoke_ffa_fn);
if (ret)
return ret;
ret = arm_ffa_bus_init();
if (ret)
return ret;
drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
if (!drv_info) {
ret = -ENOMEM;
goto ffa_bus_exit;
}
ret = ffa_version_check(&drv_info->version);
if (ret)
goto free_drv_info;
if (ffa_id_get(&drv_info->vm_id)) {
pr_err("failed to obtain VM id for self\n");
ret = -ENODEV;
goto free_drv_info;
}
drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
if (!drv_info->rx_buffer) {
ret = -ENOMEM;
goto free_pages;
}
drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
if (!drv_info->tx_buffer) {
ret = -ENOMEM;
goto free_pages;
}
ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
virt_to_phys(drv_info->rx_buffer),
RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
if (ret) {
pr_err("failed to register FFA RxTx buffers\n");
goto free_pages;
}
mutex_init(&drv_info->rx_lock);
mutex_init(&drv_info->tx_lock);
ffa_setup_partitions();
ffa_set_up_mem_ops_native_flag();
return 0;
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
free_drv_info:
kfree(drv_info);
ffa_bus_exit:
arm_ffa_bus_exit();
return ret;
}
subsys_initcall(ffa_init);
static void __exit ffa_exit(void)
{
ffa_rxtx_unmap(drv_info->vm_id);
free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
kfree(drv_info);
arm_ffa_bus_exit();
}
module_exit(ffa_exit);
MODULE_ALIAS("arm-ffa");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("Arm FF-A interface driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/arm_ffa/driver.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Zynq MPSoC Firmware layer for debugfs APIs
*
* Copyright (C) 2014-2018 Xilinx, Inc.
*
* Michal Simek <[email protected]>
* Davorin Mista <[email protected]>
* Jolly Shah <[email protected]>
* Rajan Vaja <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
#define PM_API_NAME_LEN 50
struct pm_api_info {
u32 api_id;
char api_name[PM_API_NAME_LEN];
char api_name_len;
};
static char debugfs_buf[PAGE_SIZE];
#define PM_API(id) {id, #id, strlen(#id)}
static struct pm_api_info pm_api_list[] = {
PM_API(PM_GET_API_VERSION),
PM_API(PM_QUERY_DATA),
};
static struct dentry *firmware_debugfs_root;
/**
* zynqmp_pm_argument_value() - Extract argument value from a PM-API request
* @arg: Entered PM-API argument in string format
*
* Return: Argument value in unsigned integer format on success
* 0 otherwise
*/
static u64 zynqmp_pm_argument_value(char *arg)
{
u64 value;
if (!arg)
return 0;
if (!kstrtou64(arg, 0, &value))
return value;
return 0;
}
/**
* get_pm_api_id() - Extract API-ID from a PM-API request
* @pm_api_req: Entered PM-API argument in string format
* @pm_id: API-ID
*
* Return: 0 on success else error code
*/
static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(pm_api_list) ; i++) {
if (!strncasecmp(pm_api_req, pm_api_list[i].api_name,
pm_api_list[i].api_name_len)) {
*pm_id = pm_api_list[i].api_id;
break;
}
}
/* If no name was entered look for PM-API ID instead */
if (i == ARRAY_SIZE(pm_api_list) && kstrtouint(pm_api_req, 10, pm_id))
return -EINVAL;
return 0;
}
static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
u32 pm_api_version;
int ret;
struct zynqmp_pm_query_data qdata = {0};
switch (pm_id) {
case PM_GET_API_VERSION:
ret = zynqmp_pm_get_api_version(&pm_api_version);
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
case PM_QUERY_DATA:
qdata.qid = pm_api_arg[0];
qdata.arg1 = pm_api_arg[1];
qdata.arg2 = pm_api_arg[2];
qdata.arg3 = pm_api_arg[3];
ret = zynqmp_pm_query_data(qdata, pm_api_ret);
if (ret)
break;
switch (qdata.qid) {
case PM_QID_CLOCK_GET_NAME:
sprintf(debugfs_buf, "Clock name = %s\n",
(char *)pm_api_ret);
break;
case PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS:
sprintf(debugfs_buf, "Multiplier = %d, Divider = %d\n",
pm_api_ret[1], pm_api_ret[2]);
break;
default:
sprintf(debugfs_buf,
"data[0] = 0x%08x\ndata[1] = 0x%08x\n data[2] = 0x%08x\ndata[3] = 0x%08x\n",
pm_api_ret[0], pm_api_ret[1],
pm_api_ret[2], pm_api_ret[3]);
}
break;
default:
sprintf(debugfs_buf, "Unsupported PM-API request\n");
ret = -EINVAL;
}
return ret;
}
/**
* zynqmp_pm_debugfs_api_write() - debugfs write function
* @file: User file
* @ptr: User entered PM-API string
* @len: Length of the userspace buffer
* @off: Offset within the file
*
* Used for triggering pm api functions by writing
* echo <pm_api_id> > /sys/kernel/debug/zynqmp_pm/power or
* echo <pm_api_name> > /sys/kernel/debug/zynqmp_pm/power
*
* Return: Number of bytes copied if PM-API request succeeds,
* the corresponding error code otherwise
*/
static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
const char __user *ptr, size_t len,
loff_t *off)
{
char *kern_buff, *tmp_buff;
char *pm_api_req;
u32 pm_id = 0;
u64 pm_api_arg[4] = {0, 0, 0, 0};
/* Return values from PM APIs calls */
u32 pm_api_ret[4] = {0, 0, 0, 0};
int ret;
int i = 0;
strcpy(debugfs_buf, "");
if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
return -EINVAL;
kern_buff = memdup_user_nul(ptr, len);
if (IS_ERR(kern_buff))
return PTR_ERR(kern_buff);
tmp_buff = kern_buff;
/* Read the API name from a user request */
pm_api_req = strsep(&kern_buff, " ");
ret = get_pm_api_id(pm_api_req, &pm_id);
if (ret < 0)
goto err;
/* Read node_id and arguments from the PM-API request */
pm_api_req = strsep(&kern_buff, " ");
while ((i < ARRAY_SIZE(pm_api_arg)) && pm_api_req) {
pm_api_arg[i++] = zynqmp_pm_argument_value(pm_api_req);
pm_api_req = strsep(&kern_buff, " ");
}
ret = process_api_request(pm_id, pm_api_arg, pm_api_ret);
err:
kfree(tmp_buff);
if (ret)
return ret;
return len;
}
/**
* zynqmp_pm_debugfs_api_read() - debugfs read function
* @file: User file
* @ptr: Requested pm_api_version string
* @len: Length of the userspace buffer
* @off: Offset within the file
*
* Return: Length of the version string on success
* else error code
*/
static ssize_t zynqmp_pm_debugfs_api_read(struct file *file, char __user *ptr,
size_t len, loff_t *off)
{
return simple_read_from_buffer(ptr, len, off, debugfs_buf,
strlen(debugfs_buf));
}
/* Setup debugfs fops */
static const struct file_operations fops_zynqmp_pm_dbgfs = {
.owner = THIS_MODULE,
.write = zynqmp_pm_debugfs_api_write,
.read = zynqmp_pm_debugfs_api_read,
};
/**
* zynqmp_pm_api_debugfs_init - Initialize debugfs interface
*
* Return: None
*/
void zynqmp_pm_api_debugfs_init(void)
{
/* Initialize debugfs interface */
firmware_debugfs_root = debugfs_create_dir("zynqmp-firmware", NULL);
debugfs_create_file("pm", 0660, firmware_debugfs_root, NULL,
&fops_zynqmp_pm_dbgfs);
}
/**
* zynqmp_pm_api_debugfs_exit - Remove debugfs interface
*
* Return: None
*/
void zynqmp_pm_api_debugfs_exit(void)
{
debugfs_remove_recursive(firmware_debugfs_root);
}
| linux-master | drivers/firmware/xilinx/zynqmp-debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
*
* Michal Simek <[email protected]>
* Davorin Mista <[email protected]>
* Jolly Shah <[email protected]>
* Rajan Vaja <[email protected]>
*/
#include <linux/arm-smccc.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/hashtable.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/firmware/xlnx-event-manager.h>
#include "zynqmp-debug.h"
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
#define PM_API_FEATURE_CHECK_MAX_ORDER 7
/* CRL registers and bitfields */
#define CRL_APB_BASE 0xFF5E0000U
/* BOOT_PIN_CTRL- Used to control the mode pins after boot */
#define CRL_APB_BOOT_PIN_CTRL (CRL_APB_BASE + (0x250U))
/* BOOT_PIN_CTRL_MASK- out_val[11:8], out_en[3:0] */
#define CRL_APB_BOOTPIN_CTRL_MASK 0xF0FU
/* IOCTL/QUERY feature payload size */
#define FEATURE_PAYLOAD_SIZE 2
/* Firmware feature check version mask */
#define FIRMWARE_VERSION_MASK GENMASK(15, 0)
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
static u32 ioctl_features[FEATURE_PAYLOAD_SIZE];
static u32 query_features[FEATURE_PAYLOAD_SIZE];
static struct platform_device *em_dev;
/**
* struct zynqmp_devinfo - Structure for Zynqmp device instance
* @dev: Device Pointer
* @feature_conf_id: Feature conf id
*/
struct zynqmp_devinfo {
struct device *dev;
u32 feature_conf_id;
};
/**
* struct pm_api_feature_data - PM API Feature data
* @pm_api_id: PM API Id, used as key to index into hashmap
* @feature_status: status of PM API feature: valid, invalid
* @hentry: hlist_node that hooks this entry into hashtable
*/
struct pm_api_feature_data {
u32 pm_api_id;
int feature_status;
struct hlist_node hentry;
};
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
},
};
/**
* zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
* @ret_status: PMUFW return code
*
* Return: corresponding Linux error code
*/
static int zynqmp_pm_ret_code(u32 ret_status)
{
switch (ret_status) {
case XST_PM_SUCCESS:
case XST_PM_DOUBLE_REQ:
return 0;
case XST_PM_NO_FEATURE:
return -ENOTSUPP;
case XST_PM_NO_ACCESS:
return -EACCES;
case XST_PM_ABORT_SUSPEND:
return -ECANCELED;
case XST_PM_MULT_USER:
return -EUSERS;
case XST_PM_INTERNAL:
case XST_PM_CONFLICT:
case XST_PM_INVALID_NODE:
default:
return -EINVAL;
}
}
static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2,
u32 *ret_payload)
{
return -ENODEV;
}
/*
* PM function call wrapper
* Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration
*/
static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail;
/**
* do_fw_call_smc() - Call system-level platform management layer (SMC)
* @arg0: Argument 0 to SMC call
* @arg1: Argument 1 to SMC call
* @arg2: Argument 2 to SMC call
* @ret_payload: Returned value array
*
* Invoke platform management function via SMC call (no hypervisor present).
*
* Return: Returns status, either success or error+reason
*/
static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2,
u32 *ret_payload)
{
struct arm_smccc_res res;
arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
if (ret_payload) {
ret_payload[0] = lower_32_bits(res.a0);
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
}
/**
* do_fw_call_hvc() - Call system-level platform management layer (HVC)
* @arg0: Argument 0 to HVC call
* @arg1: Argument 1 to HVC call
* @arg2: Argument 2 to HVC call
* @ret_payload: Returned value array
*
* Invoke platform management function via HVC
* HVC-based for communication through hypervisor
* (no direct communication with ATF).
*
* Return: Returns status, either success or error+reason
*/
static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
u32 *ret_payload)
{
struct arm_smccc_res res;
arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res);
if (ret_payload) {
ret_payload[0] = lower_32_bits(res.a0);
ret_payload[1] = upper_32_bits(res.a0);
ret_payload[2] = lower_32_bits(res.a1);
ret_payload[3] = upper_32_bits(res.a1);
}
return zynqmp_pm_ret_code((enum pm_ret_status)res.a0);
}
static int __do_feature_check_call(const u32 api_id, u32 *ret_payload)
{
int ret;
u64 smc_arg[2];
smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK;
smc_arg[1] = api_id;
ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload);
if (ret)
ret = -EOPNOTSUPP;
else
ret = ret_payload[1];
return ret;
}
static int do_feature_check_call(const u32 api_id)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
struct pm_api_feature_data *feature_data;
/* Check for existing entry in hash table for given api */
hash_for_each_possible(pm_api_features_map, feature_data, hentry,
api_id) {
if (feature_data->pm_api_id == api_id)
return feature_data->feature_status;
}
/* Add new entry if not present */
feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
if (!feature_data)
return -ENOMEM;
feature_data->pm_api_id = api_id;
ret = __do_feature_check_call(api_id, ret_payload);
feature_data->feature_status = ret;
hash_add(pm_api_features_map, &feature_data->hentry, api_id);
if (api_id == PM_IOCTL)
/* Store supported IOCTL IDs mask */
memcpy(ioctl_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
else if (api_id == PM_QUERY_DATA)
/* Store supported QUERY IDs mask */
memcpy(query_features, &ret_payload[2], FEATURE_PAYLOAD_SIZE * 4);
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
* zynqmp_pm_feature() - Check whether given feature is supported or not and
* store supported IOCTL/QUERY ID mask
* @api_id: API ID to check
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_feature(const u32 api_id)
{
int ret;
if (!feature_check_enabled)
return 0;
ret = do_feature_check_call(api_id);
return ret;
}
/**
* zynqmp_pm_is_function_supported() - Check whether given IOCTL/QUERY function
* is supported or not
* @api_id: PM_IOCTL or PM_QUERY_DATA
* @id: IOCTL or QUERY function IDs
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
{
int ret;
u32 *bit_mask;
/* Input arguments validation */
if (id >= 64 || (api_id != PM_IOCTL && api_id != PM_QUERY_DATA))
return -EINVAL;
/* Check feature check API version */
ret = do_feature_check_call(PM_FEATURE_CHECK);
if (ret < 0)
return ret;
/* Check if feature check version 2 is supported or not */
if ((ret & FIRMWARE_VERSION_MASK) == PM_API_VERSION_2) {
/*
* Call feature check for IOCTL/QUERY API to get IOCTL ID or
* QUERY ID feature status.
*/
ret = do_feature_check_call(api_id);
if (ret < 0)
return ret;
bit_mask = (api_id == PM_IOCTL) ? ioctl_features : query_features;
if ((bit_mask[(id / 32)] & BIT((id % 32))) == 0U)
return -EOPNOTSUPP;
} else {
return -ENODATA;
}
return 0;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported);
/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
* caller function depending on the configuration
* @pm_api_id: Requested PM-API call
* @arg0: Argument 0 to requested PM-API call
* @arg1: Argument 1 to requested PM-API call
* @arg2: Argument 2 to requested PM-API call
* @arg3: Argument 3 to requested PM-API call
* @ret_payload: Returned value array
*
* Invoke platform management function for SMC or HVC call, depending on
* configuration.
* Following SMC Calling Convention (SMCCC) for SMC64:
* Pm Function Identifier,
* PM_SIP_SVC + PM_API_ID =
* ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT)
* ((SMC_64) << FUNCID_CC_SHIFT)
* ((SIP_START) << FUNCID_OEN_SHIFT)
* ((PM_API_ID) & FUNCID_NUM_MASK))
*
* PM_SIP_SVC - Registered ZynqMP SIP Service Call.
* PM_API_ID - Platform Management API ID.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload)
{
/*
* Added SIP service call Function Identifier
* Make sure to stay in x0 register
*/
u64 smc_arg[4];
int ret;
/* Check if feature is supported or not */
ret = zynqmp_pm_feature(pm_api_id);
if (ret < 0)
return ret;
smc_arg[0] = PM_SIP_SVC | pm_api_id;
smc_arg[1] = ((u64)arg1 << 32) | arg0;
smc_arg[2] = ((u64)arg3 << 32) | arg2;
return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload);
}
static u32 pm_api_version;
static u32 pm_tz_version;
static u32 pm_family_code;
static u32 pm_sub_family_code;
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
int ret;
ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0,
NULL);
if (!ret)
return ret;
/* try old implementation as fallback strategy if above fails */
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
reset, NULL);
}
/**
* zynqmp_pm_get_api_version() - Get version number of PMU PM firmware
* @version: Returned version value
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_get_api_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!version)
return -EINVAL;
/* Check is PM API version already verified */
if (pm_api_version > 0) {
*version = pm_api_version;
return 0;
}
ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload);
*version = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
* @idcode: IDCODE register
* @version: version register
*
* Return: Returns the status of the operation and the idcode and version
* registers in @idcode and @version.
*/
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!idcode || !version)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
*idcode = ret_payload[1];
*version = ret_payload[2];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_family_info() - Get family info of platform
* @family: Returned family code value
* @subfamily: Returned sub-family code value
*
* Return: Returns status, either success or error+reason
*/
static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 idcode;
int ret;
/* Check is family or sub-family code already received */
if (pm_family_code && pm_sub_family_code) {
*family = pm_family_code;
*subfamily = pm_sub_family_code;
return 0;
}
ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
if (ret < 0)
return ret;
idcode = ret_payload[1];
pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode);
pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode);
*family = pm_family_code;
*subfamily = pm_sub_family_code;
return 0;
}
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
* Return: Returns status, either success or error+reason
*/
static int zynqmp_pm_get_trustzone_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!version)
return -EINVAL;
/* Check is PM trustzone version already verified */
if (pm_tz_version > 0) {
*version = pm_tz_version;
return 0;
}
ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0,
0, 0, ret_payload);
*version = ret_payload[1];
return ret;
}
/**
* get_set_conduit_method() - Choose SMC or HVC based communication
* @np: Pointer to the device_node structure
*
* Use SMC or HVC-based functions to communicate with EL2/EL3.
*
* Return: Returns 0 on success or error code
*/
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
if (of_property_read_string(np, "method", &method)) {
pr_warn("%s missing \"method\" property\n", __func__);
return -ENXIO;
}
if (!strcmp("hvc", method)) {
do_fw_call = do_fw_call_hvc;
} else if (!strcmp("smc", method)) {
do_fw_call = do_fw_call_smc;
} else {
pr_warn("%s Invalid \"method\" property: %s\n",
__func__, method);
return -EINVAL;
}
return 0;
}
/**
* zynqmp_pm_query_data() - Get query data from firmware
* @qdata: Variable to the zynqmp_pm_query_data structure
* @out: Returned output value
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1,
qdata.arg2, qdata.arg3, out);
/*
* For clock name query, all bytes in SMC response are clock name
* characters and return code is always success. For invalid clocks,
* clock name bytes would be zeros.
*/
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
/**
* zynqmp_pm_clock_enable() - Enable the clock for given id
* @clock_id: ID of the clock to be enabled
*
* This function is used by master to enable the clock
* including peripherals and PLL clocks.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_enable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
/**
* zynqmp_pm_clock_disable() - Disable the clock for given id
* @clock_id: ID of the clock to be disable
*
* This function is used by master to disable the clock
* including peripherals and PLL clocks.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_disable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
/**
* zynqmp_pm_clock_getstate() - Get the clock state for given id
* @clock_id: ID of the clock to be queried
* @state: 1/0 (Enabled/Disabled)
*
* This function is used by master to get the state of clock
* including peripherals and PLL clocks.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0,
0, 0, ret_payload);
*state = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
/**
* zynqmp_pm_clock_setdivider() - Set the clock divider for given id
* @clock_id: ID of the clock
* @divider: divider value
*
* This function is used by master to set divider for any clock
* to achieve desired rate.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
/**
* zynqmp_pm_clock_getdivider() - Get the clock divider for given id
* @clock_id: ID of the clock
* @divider: divider value
*
* This function is used by master to get divider values
* for any clock.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0,
0, 0, ret_payload);
*divider = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
* zynqmp_pm_clock_setrate() - Set the clock rate for given id
* @clock_id: ID of the clock
* @rate: rate value in hz
*
* This function is used by master to set rate for any clock.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
lower_32_bits(rate),
upper_32_bits(rate),
0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
/**
* zynqmp_pm_clock_getrate() - Get the clock rate for given id
* @clock_id: ID of the clock
* @rate: rate value in hz
*
* This function is used by master to get rate
* for any clock.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0,
0, 0, ret_payload);
*rate = ((u64)ret_payload[2] << 32) | ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
* @clock_id: ID of the clock
* @parent_id: parent id
*
* This function is used by master to set parent for any clock.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
parent_id, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
/**
* zynqmp_pm_clock_getparent() - Get the clock parent for given id
* @clock_id: ID of the clock
* @parent_id: parent id
*
* This function is used by master to get parent index
* for any clock.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0,
0, 0, ret_payload);
*parent_id = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
/**
* zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
*
* @clk_id: PLL clock ID
* @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
*
* This function sets PLL mode
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
clk_id, mode, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
/**
* zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
*
* @clk_id: PLL clock ID
* @mode: PLL mode
*
* This function return current PLL mode
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
clk_id, 0, mode);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
/**
* zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
*
* @clk_id: PLL clock ID
* @data: fraction data
*
* This function sets fraction data.
* It is valid for fraction mode only.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
clk_id, data, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
/**
* zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
*
* @clk_id: PLL clock ID
* @data: fraction data
*
* This function returns fraction data value.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
clk_id, 0, data);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
/**
* zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
*
* @node_id: Node ID of the device
* @type: Type of tap delay to set (input/output)
* @value: Value to set fot the tap delay
*
* This function sets input/output tap delay for the SD device.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
u32 reg = (type == PM_TAPDELAY_INPUT) ? SD_ITAPDLY : SD_OTAPDLYSEL;
u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16);
if (value) {
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
IOCTL_SET_SD_TAPDELAY,
type, value, NULL);
}
/*
* Work around completely misdesigned firmware API on Xilinx ZynqMP.
* The IOCTL_SET_SD_TAPDELAY firmware call allows the caller to only
* ever set IOU_SLCR SD_ITAPDLY Register SD0_ITAPDLYENA/SD1_ITAPDLYENA
* bits, but there is no matching call to clear those bits. If those
* bits are not cleared, SDMMC tuning may fail.
*
* Luckily, there are PM_MMIO_READ/PM_MMIO_WRITE calls which seem to
* allow complete unrestricted access to all address space, including
* IOU_SLCR SD_ITAPDLY Register and all the other registers, access
* to which was supposed to be protected by the current firmware API.
*
* Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter
* part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits.
*/
return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
/**
* zynqmp_pm_sd_dll_reset() - Reset DLL logic
*
* @node_id: Node ID of the device
* @type: Reset type
*
* This function resets DLL logic for the SD device.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET,
type, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
* zynqmp_pm_ospi_mux_select() - OSPI Mux selection
*
* @dev_id: Device Id of the OSPI device.
* @select: OSPI Mux select value.
*
* This function select the OSPI Mux.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
select, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
* @index: GGS register index
* @value: Register value to be written
*
* This function writes value to GGS register.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_ggs(u32 index, u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
index, value, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
/**
* zynqmp_pm_read_ggs() - PM API for reading global general storage (ggs)
* @index: GGS register index
* @value: Register value to be written
*
* This function returns GGS register value.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
index, 0, value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
/**
* zynqmp_pm_write_pggs() - PM API for writing persistent global general
* storage (pggs)
* @index: PGGS register index
* @value: Register value to be written
*
* This function writes value to PGGS register.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_pggs(u32 index, u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
/**
* zynqmp_pm_read_pggs() - PM API for reading persistent global general
* storage (pggs)
* @index: PGGS register index
* @value: Register value to be written
*
* This function returns PGGS register value.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
value);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS,
index, value, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass);
/**
* zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
* @value: Status value to be written
*
* This function sets healthy bit value to indicate boot health status
* to firmware.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_boot_health_status(u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
value, 0, NULL);
}
/**
* zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
* @reset: Reset to be configured
* @assert_flag: Flag stating should reset be asserted (1) or
* released (0)
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
/**
* zynqmp_pm_reset_get_status - Get status of the reset
* @reset: Reset whose status should be returned
* @status: Returned status
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!status)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
0, 0, ret_payload);
*status = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
/**
* zynqmp_pm_fpga_load - Perform the fpga load
* @address: Address to write to
* @size: pl bitstream size
* @flags: Bitstream type
* -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
* -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
*
* This function provides access to pmufw. To transfer
* the required bitstream into PL.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
upper_32_bits(address), size, flags,
ret_payload);
if (ret_payload[0])
return -ret_payload[0];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
/**
* zynqmp_pm_fpga_get_status - Read value from PCAP status register
* @value: Value to read
*
* This function provides access to the pmufw to get the PCAP
* status
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_fpga_get_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!value)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
*value = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
* zynqmp_pm_fpga_get_config_status - Get the FPGA configuration status.
* @value: Buffer to store FPGA configuration status.
*
* This function provides access to the pmufw to get the FPGA configuration
* status
*
* Return: 0 on success, a negative value on error
*/
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
lower_addr = lower_32_bits((u64)&buf);
upper_addr = upper_32_bits((u64)&buf);
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ,
XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET,
lower_addr, upper_addr,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG,
ret_payload);
*value = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status);
/**
* zynqmp_pm_pinctrl_request - Request Pin from firmware
* @pin: Pin number to request
*
* This function requests pin from firmware.
*
* Return: Returns status, either success or error+reason.
*/
int zynqmp_pm_pinctrl_request(const u32 pin)
{
return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request);
/**
* zynqmp_pm_pinctrl_release - Inform firmware that Pin control is released
* @pin: Pin number to release
*
* This function release pin from firmware.
*
* Return: Returns status, either success or error+reason.
*/
int zynqmp_pm_pinctrl_release(const u32 pin)
{
return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release);
/**
* zynqmp_pm_pinctrl_get_function - Read function id set for the given pin
* @pin: Pin number
* @id: Buffer to store function ID
*
* This function provides the function currently set for the given pin.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!id)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0,
0, 0, ret_payload);
*id = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function);
/**
* zynqmp_pm_pinctrl_set_function - Set requested function for the pin
* @pin: Pin number
* @id: Function ID to set
*
* This function sets requested function for the given pin.
*
* Return: Returns status, either success or error+reason.
*/
int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id)
{
return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id,
0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function);
/**
* zynqmp_pm_pinctrl_get_config - Get configuration parameter for the pin
* @pin: Pin number
* @param: Parameter to get
* @value: Buffer to store parameter value
*
* This function gets requested configuration parameter for the given pin.
*
* Return: Returns status, either success or error+reason.
*/
int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!value)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param,
0, 0, ret_payload);
*value = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_config);
/**
* zynqmp_pm_pinctrl_set_config - Set configuration parameter for the pin
* @pin: Pin number
* @param: Parameter to set
* @value: Parameter value to set
*
* This function sets requested configuration parameter for the given pin.
*
* Return: Returns status, either success or error+reason.
*/
int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value)
{
int ret;
if (pm_family_code == ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
if (ret < PM_PINCTRL_PARAM_SET_VERSION)
return -EOPNOTSUPP;
}
return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin,
param, value, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config);
/**
* zynqmp_pm_bootmode_read() - PM Config API for read bootpin status
* @ps_mode: Returned output value of ps_mode
*
* This API function is to be used for notify the power management controller
* to read bootpin status.
*
* Return: status, either success or error+reason
*/
unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
{
unsigned int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, CRL_APB_BOOT_PIN_CTRL, 0,
0, 0, ret_payload);
*ps_mode = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_read);
/**
* zynqmp_pm_bootmode_write() - PM Config API for Configure bootpin
* @ps_mode: Value to be written to the bootpin ctrl register
*
* This API function is to be used for notify the power management controller
* to configure bootpin.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_bootmode_write(u32 ps_mode)
{
return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, CRL_APB_BOOT_PIN_CTRL,
CRL_APB_BOOTPIN_CTRL_MASK, ps_mode, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write);
/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
* Return: Returns status, either success or error+reason
*
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
*/
int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
* @mode: Mode to set for system suspend
*
* This API function is used to set mode of system suspend.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_suspend_mode(u32 mode)
{
return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
/**
* zynqmp_pm_request_node() - Request a node with specific capabilities
* @node: Node ID of the slave
* @capabilities: Requested capabilities of the slave
* @qos: Quality of service (not supported)
* @ack: Flag to specify whether acknowledge is requested
*
* This function is used by master to request particular node from firmware.
* Every master must request node before using it.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
const u32 qos, const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
qos, ack, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
/**
* zynqmp_pm_release_node() - Release a node
* @node: Node ID of the slave
*
* This function is used by master to inform firmware that master
* has released node. Once released, master must not use that node
* without re-request.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_release_node(const u32 node)
{
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
/**
* zynqmp_pm_get_rpu_mode() - Get RPU mode
* @node_id: Node ID of the device
* @rpu_mode: return by reference value
* either split or lockstep
*
* Return: return 0 on success or error+reason.
* if success, then rpu_mode will be set
* to current rpu mode.
*/
int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload);
/* only set rpu_mode if no error */
if (ret == XST_PM_SUCCESS)
*rpu_mode = ret_payload[0];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode);
/**
* zynqmp_pm_set_rpu_mode() - Set RPU mode
* @node_id: Node ID of the device
* @rpu_mode: Argument 1 to requested IOCTL call. either split or lockstep
*
* This function is used to set RPU mode to split or
* lockstep
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode,
0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode);
/**
* zynqmp_pm_set_tcm_config - configure TCM
* @node_id: Firmware specific TCM subsystem ID
* @tcm_mode: Argument 1 to requested IOCTL call
* either PM_RPU_TCM_COMB or PM_RPU_TCM_SPLIT
*
* This function is used to set RPU mode to split or combined
*
* Return: status: 0 for success, else failure
*/
int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, node_id,
IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0,
NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
/**
* zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
* be powered down forcefully
* @node: Node ID of the targeted PU or subsystem
* @ack: Flag to specify whether acknowledge is requested
*
* Return: status, either success or error+reason
*/
int zynqmp_pm_force_pwrdwn(const u32 node,
const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn);
/**
* zynqmp_pm_request_wake - PM call to wake up selected master or subsystem
* @node: Node ID of the master or subsystem
* @set_addr: Specifies whether the address argument is relevant
* @address: Address from which to resume when woken up
* @ack: Flag to specify whether acknowledge requested
*
* Return: status, either success or error+reason
*/
int zynqmp_pm_request_wake(const u32 node,
const bool set_addr,
const u64 address,
const enum zynqmp_pm_request_ack ack)
{
/* set_addr flag is encoded into 1st bit of address */
return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr,
address >> 32, ack, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake);
/**
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
* @node: Node ID of the slave
* @capabilities: Requested capabilities of the slave
* @qos: Quality of service (not supported)
* @ack: Flag to specify whether acknowledge is requested
*
* This API function is to be used for slaves a PU already has requested
* to change its capabilities.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
qos, ack, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_load_pdi - Load and process PDI
* @src: Source device where PDI is located
* @address: PDI src address
*
* This function provides support to load PDI from linux
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_load_pdi(const u32 src, const u64 address)
{
return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src,
lower_32_bits(address),
upper_32_bits(address), 0, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi);
/**
* zynqmp_pm_aes_engine - Access AES hardware to encrypt/decrypt the data using
* AES-GCM core.
* @address: Address of the AesParams structure.
* @out: Returned output value
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
if (!out)
return -EINVAL;
ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address),
lower_32_bits(address),
0, 0, ret_payload);
*out = ret_payload[1];
return ret;
}
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
* zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
* @address: Address of the data/ Address of output buffer where
* hash should be stored.
* @size: Size of the data.
* @flags:
* BIT(0) - for initializing csudma driver and SHA3(Here address
* and size inputs can be NULL).
* BIT(1) - to call Sha3_Update API which can be called multiple
* times when data is not contiguous.
* BIT(2) - to get final hash of the whole updated data.
* Hash will be overwritten at provided address with
* 48 bytes.
*
* Return: Returns status, either success or error code.
*/
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags)
{
u32 lower_addr = lower_32_bits(address);
u32 upper_addr = upper_32_bits(address);
return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr,
size, flags, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash);
/**
* zynqmp_pm_register_notifier() - PM API for register a subsystem
* to be notified about specific
* event/error.
* @node: Node ID to which the event is related.
* @event: Event Mask of Error events for which wants to get notified.
* @wake: Wake subsystem upon capturing the event if value 1
* @enable: Enable the registration for value 1, disable for value 0
*
* This function is used to register/un-register for particular node-event
* combination in firmware.
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
const u32 wake, const u32 enable)
{
return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
wake, enable, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
/**
* zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
* @type: Shutdown or restart? 0 for shutdown, 1 for restart
* @subtype: Specifies which system should be restarted or shut down
*
* Return: Returns status, either success or error+reason
*/
int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
{
return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
0, 0, NULL);
}
/**
* zynqmp_pm_set_feature_config - PM call to request IOCTL for feature config
* @id: The config ID of the feature to be configured
* @value: The config value of the feature to be configured
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG,
id, value, NULL);
}
/**
* zynqmp_pm_get_feature_config - PM call to get value of configured feature
* @id: The config id of the feature to be queried
* @payload: Returned value array
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
u32 *payload)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG,
id, 0, payload);
}
/**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID
* @config: The config type of SD registers
* @value: Value to be set
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
config, value, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
/**
* zynqmp_pm_set_gem_config - PM call to set value of GEM config registers
* @node: GEM node ID
* @config: The config type of GEM registers
* @value: Value to be set
*
* Return: Returns 0 on success or error value on failure.
*/
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
u32 value)
{
return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
config, value, NULL);
}
EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
/**
* struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
* @subtype: Shutdown subtype
* @name: Matching string for scope argument
*
* This struct encapsulates mapping between shutdown scope ID and string.
*/
struct zynqmp_pm_shutdown_scope {
const enum zynqmp_pm_shutdown_subtype subtype;
const char *name;
};
static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
.subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
.name = "subsystem",
},
[ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
.subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
.name = "ps_only",
},
[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
.subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
.name = "system",
},
};
static struct zynqmp_pm_shutdown_scope *selected_scope =
&shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
/**
* zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
* @scope_string: Shutdown scope string
*
* Return: Return pointer to matching shutdown scope struct from
* array of available options in system if string is valid,
* otherwise returns NULL.
*/
static struct zynqmp_pm_shutdown_scope*
zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
{
int count;
for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
if (sysfs_streq(scope_string, shutdown_scopes[count].name))
return &shutdown_scopes[count];
return NULL;
}
static ssize_t shutdown_scope_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
int i;
for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
if (&shutdown_scopes[i] == selected_scope) {
strcat(buf, "[");
strcat(buf, shutdown_scopes[i].name);
strcat(buf, "]");
} else {
strcat(buf, shutdown_scopes[i].name);
}
strcat(buf, " ");
}
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t shutdown_scope_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct zynqmp_pm_shutdown_scope *scope;
scope = zynqmp_pm_is_shutdown_scope_valid(buf);
if (!scope)
return -EINVAL;
ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
scope->subtype);
if (ret) {
pr_err("unable to set shutdown scope %s\n", buf);
return ret;
}
selected_scope = scope;
return count;
}
static DEVICE_ATTR_RW(shutdown_scope);
static ssize_t health_status_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned int value;
ret = kstrtouint(buf, 10, &value);
if (ret)
return ret;
ret = zynqmp_pm_set_boot_health_status(value);
if (ret) {
dev_err(device, "unable to set healthy bit value to %u\n",
value);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(health_status);
static ssize_t ggs_show(struct device *device,
struct device_attribute *attr,
char *buf,
u32 reg)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
ret = zynqmp_pm_read_ggs(reg, ret_payload);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", ret_payload[1]);
}
static ssize_t ggs_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count,
u32 reg)
{
long value;
int ret;
if (reg >= GSS_NUM_REGS)
return -EINVAL;
ret = kstrtol(buf, 16, &value);
if (ret) {
count = -EFAULT;
goto err;
}
ret = zynqmp_pm_write_ggs(reg, value);
if (ret)
count = -EFAULT;
err:
return count;
}
/* GGS register show functions */
#define GGS0_SHOW(N) \
ssize_t ggs##N##_show(struct device *device, \
struct device_attribute *attr, \
char *buf) \
{ \
return ggs_show(device, attr, buf, N); \
}
static GGS0_SHOW(0);
static GGS0_SHOW(1);
static GGS0_SHOW(2);
static GGS0_SHOW(3);
/* GGS register store function */
#define GGS0_STORE(N) \
ssize_t ggs##N##_store(struct device *device, \
struct device_attribute *attr, \
const char *buf, \
size_t count) \
{ \
return ggs_store(device, attr, buf, count, N); \
}
static GGS0_STORE(0);
static GGS0_STORE(1);
static GGS0_STORE(2);
static GGS0_STORE(3);
static ssize_t pggs_show(struct device *device,
struct device_attribute *attr,
char *buf,
u32 reg)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
ret = zynqmp_pm_read_pggs(reg, ret_payload);
if (ret)
return ret;
return sprintf(buf, "0x%x\n", ret_payload[1]);
}
static ssize_t pggs_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count,
u32 reg)
{
long value;
int ret;
if (reg >= GSS_NUM_REGS)
return -EINVAL;
ret = kstrtol(buf, 16, &value);
if (ret) {
count = -EFAULT;
goto err;
}
ret = zynqmp_pm_write_pggs(reg, value);
if (ret)
count = -EFAULT;
err:
return count;
}
#define PGGS0_SHOW(N) \
ssize_t pggs##N##_show(struct device *device, \
struct device_attribute *attr, \
char *buf) \
{ \
return pggs_show(device, attr, buf, N); \
}
#define PGGS0_STORE(N) \
ssize_t pggs##N##_store(struct device *device, \
struct device_attribute *attr, \
const char *buf, \
size_t count) \
{ \
return pggs_store(device, attr, buf, count, N); \
}
/* PGGS register show functions */
static PGGS0_SHOW(0);
static PGGS0_SHOW(1);
static PGGS0_SHOW(2);
static PGGS0_SHOW(3);
/* PGGS register store functions */
static PGGS0_STORE(0);
static PGGS0_STORE(1);
static PGGS0_STORE(2);
static PGGS0_STORE(3);
/* GGS register attributes */
static DEVICE_ATTR_RW(ggs0);
static DEVICE_ATTR_RW(ggs1);
static DEVICE_ATTR_RW(ggs2);
static DEVICE_ATTR_RW(ggs3);
/* PGGS register attributes */
static DEVICE_ATTR_RW(pggs0);
static DEVICE_ATTR_RW(pggs1);
static DEVICE_ATTR_RW(pggs2);
static DEVICE_ATTR_RW(pggs3);
static ssize_t feature_config_id_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
return sysfs_emit(buf, "%d\n", devinfo->feature_conf_id);
}
static ssize_t feature_config_id_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 config_id;
int ret;
struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
if (!buf)
return -EINVAL;
ret = kstrtou32(buf, 10, &config_id);
if (ret)
return ret;
devinfo->feature_conf_id = config_id;
return count;
}
static DEVICE_ATTR_RW(feature_config_id);
static ssize_t feature_config_value_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
ret = zynqmp_pm_get_feature_config(devinfo->feature_conf_id,
ret_payload);
if (ret)
return ret;
return sysfs_emit(buf, "%d\n", ret_payload[1]);
}
static ssize_t feature_config_value_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
u32 value;
int ret;
struct zynqmp_devinfo *devinfo = dev_get_drvdata(device);
if (!buf)
return -EINVAL;
ret = kstrtou32(buf, 10, &value);
if (ret)
return ret;
ret = zynqmp_pm_set_feature_config(devinfo->feature_conf_id,
value);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(feature_config_value);
static struct attribute *zynqmp_firmware_attrs[] = {
&dev_attr_ggs0.attr,
&dev_attr_ggs1.attr,
&dev_attr_ggs2.attr,
&dev_attr_ggs3.attr,
&dev_attr_pggs0.attr,
&dev_attr_pggs1.attr,
&dev_attr_pggs2.attr,
&dev_attr_pggs3.attr,
&dev_attr_shutdown_scope.attr,
&dev_attr_health_status.attr,
&dev_attr_feature_config_id.attr,
&dev_attr_feature_config_value.attr,
NULL,
};
ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np;
struct zynqmp_devinfo *devinfo;
int ret;
ret = get_set_conduit_method(dev->of_node);
if (ret)
return ret;
np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp");
if (!np) {
np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
if (!np)
return 0;
feature_check_enabled = true;
}
if (!feature_check_enabled) {
ret = do_feature_check_call(PM_FEATURE_CHECK);
if (ret >= 0)
feature_check_enabled = true;
}
of_node_put(np);
devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL);
if (!devinfo)
return -ENOMEM;
devinfo->dev = dev;
platform_set_drvdata(pdev, devinfo);
/* Check PM API version number */
ret = zynqmp_pm_get_api_version(&pm_api_version);
if (ret)
return ret;
if (pm_api_version < ZYNQMP_PM_VERSION) {
panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
__func__,
ZYNQMP_PM_VERSION_MAJOR, ZYNQMP_PM_VERSION_MINOR,
pm_api_version >> 16, pm_api_version & 0xFFFF);
}
pr_info("%s Platform Management API v%d.%d\n", __func__,
pm_api_version >> 16, pm_api_version & 0xFFFF);
/* Get the Family code and sub family code of platform */
ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
if (ret < 0)
return ret;
/* Check trustzone version number */
ret = zynqmp_pm_get_trustzone_version(&pm_tz_version);
if (ret)
panic("Legacy trustzone found without version support\n");
if (pm_tz_version < ZYNQMP_TZ_VERSION)
panic("%s Trustzone version error. Expected: v%d.%d - Found: v%d.%d\n",
__func__,
ZYNQMP_TZ_VERSION_MAJOR, ZYNQMP_TZ_VERSION_MINOR,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
return ret;
}
zynqmp_pm_api_debugfs_init();
np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
if (np) {
em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
-1, NULL, 0);
if (IS_ERR(em_dev))
dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
}
of_node_put(np);
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
struct pm_api_feature_data *feature_data;
struct hlist_node *tmp;
int i;
mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
hash_del(&feature_data->hentry);
kfree(feature_data);
}
platform_device_unregister(em_dev);
return 0;
}
static const struct of_device_id zynqmp_firmware_of_match[] = {
{.compatible = "xlnx,zynqmp-firmware"},
{.compatible = "xlnx,versal-firmware"},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
static struct platform_driver zynqmp_firmware_driver = {
.driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
.dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
};
module_platform_driver(zynqmp_firmware_driver);
| linux-master | drivers/firmware/xilinx/zynqmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vpd.c
*
* Driver for exporting VPD content to sysfs.
*
* Copyright 2017 Google Inc.
*/
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "coreboot_table.h"
#include "vpd_decode.h"
#define CB_TAG_VPD 0x2c
#define VPD_CBMEM_MAGIC 0x43524f53
static struct kobject *vpd_kobj;
struct vpd_cbmem {
u32 magic;
u32 version;
u32 ro_size;
u32 rw_size;
u8 blob[];
};
struct vpd_section {
bool enabled;
const char *name;
char *raw_name; /* the string name_raw */
struct kobject *kobj; /* vpd/name directory */
char *baseaddr;
struct bin_attribute bin_attr; /* vpd/name_raw bin_attribute */
struct list_head attribs; /* key/value in vpd_attrib_info list */
};
struct vpd_attrib_info {
char *key;
const char *value;
struct bin_attribute bin_attr;
struct list_head list;
};
static struct vpd_section ro_vpd;
static struct vpd_section rw_vpd;
static ssize_t vpd_attrib_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_attrib_info *info = bin_attr->private;
return memory_read_from_buffer(buf, count, &pos, info->value,
info->bin_attr.size);
}
/*
* vpd_section_check_key_name()
*
* The VPD specification supports only [a-zA-Z0-9_]+ characters in key names but
* old firmware versions may have entries like "S/N" which are problematic when
* exporting them as sysfs attributes. These keys present in old firmwares are
* ignored.
*
* Returns VPD_OK for a valid key name, VPD_FAIL otherwise.
*
* @key: The key name to check
* @key_len: key name length
*/
static int vpd_section_check_key_name(const u8 *key, s32 key_len)
{
int c;
while (key_len-- > 0) {
c = *key++;
if (!isalnum(c) && c != '_')
return VPD_FAIL;
}
return VPD_OK;
}
static int vpd_section_attrib_add(const u8 *key, u32 key_len,
const u8 *value, u32 value_len,
void *arg)
{
int ret;
struct vpd_section *sec = arg;
struct vpd_attrib_info *info;
/*
* Return VPD_OK immediately to decode next entry if the current key
* name contains invalid characters.
*/
if (vpd_section_check_key_name(key, key_len) != VPD_OK)
return VPD_OK;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->key = kstrndup(key, key_len, GFP_KERNEL);
if (!info->key) {
ret = -ENOMEM;
goto free_info;
}
sysfs_bin_attr_init(&info->bin_attr);
info->bin_attr.attr.name = info->key;
info->bin_attr.attr.mode = 0444;
info->bin_attr.size = value_len;
info->bin_attr.read = vpd_attrib_read;
info->bin_attr.private = info;
info->value = value;
INIT_LIST_HEAD(&info->list);
ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
if (ret)
goto free_info_key;
list_add_tail(&info->list, &sec->attribs);
return 0;
free_info_key:
kfree(info->key);
free_info:
kfree(info);
return ret;
}
static void vpd_section_attrib_destroy(struct vpd_section *sec)
{
struct vpd_attrib_info *info;
struct vpd_attrib_info *temp;
list_for_each_entry_safe(info, temp, &sec->attribs, list) {
sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
kfree(info->key);
kfree(info);
}
}
static ssize_t vpd_section_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
struct vpd_section *sec = bin_attr->private;
return memory_read_from_buffer(buf, count, &pos, sec->baseaddr,
sec->bin_attr.size);
}
static int vpd_section_create_attribs(struct vpd_section *sec)
{
s32 consumed;
int ret;
consumed = 0;
do {
ret = vpd_decode_string(sec->bin_attr.size, sec->baseaddr,
&consumed, vpd_section_attrib_add, sec);
} while (ret == VPD_OK);
return 0;
}
static int vpd_section_init(const char *name, struct vpd_section *sec,
phys_addr_t physaddr, size_t size)
{
int err;
sec->baseaddr = memremap(physaddr, size, MEMREMAP_WB);
if (!sec->baseaddr)
return -ENOMEM;
sec->name = name;
/* We want to export the raw partition with name ${name}_raw */
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
goto err_memunmap;
}
sysfs_bin_attr_init(&sec->bin_attr);
sec->bin_attr.attr.name = sec->raw_name;
sec->bin_attr.attr.mode = 0444;
sec->bin_attr.size = size;
sec->bin_attr.read = vpd_section_read;
sec->bin_attr.private = sec;
err = sysfs_create_bin_file(vpd_kobj, &sec->bin_attr);
if (err)
goto err_free_raw_name;
sec->kobj = kobject_create_and_add(name, vpd_kobj);
if (!sec->kobj) {
err = -EINVAL;
goto err_sysfs_remove;
}
INIT_LIST_HEAD(&sec->attribs);
vpd_section_create_attribs(sec);
sec->enabled = true;
return 0;
err_sysfs_remove:
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
err_free_raw_name:
kfree(sec->raw_name);
err_memunmap:
memunmap(sec->baseaddr);
return err;
}
static int vpd_section_destroy(struct vpd_section *sec)
{
if (sec->enabled) {
vpd_section_attrib_destroy(sec);
kobject_put(sec->kobj);
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
memunmap(sec->baseaddr);
sec->enabled = false;
}
return 0;
}
static int vpd_sections_init(phys_addr_t physaddr)
{
struct vpd_cbmem *temp;
struct vpd_cbmem header;
int ret = 0;
temp = memremap(physaddr, sizeof(struct vpd_cbmem), MEMREMAP_WB);
if (!temp)
return -ENOMEM;
memcpy(&header, temp, sizeof(struct vpd_cbmem));
memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
return -ENODEV;
if (header.ro_size) {
ret = vpd_section_init("ro", &ro_vpd,
physaddr + sizeof(struct vpd_cbmem),
header.ro_size);
if (ret)
return ret;
}
if (header.rw_size) {
ret = vpd_section_init("rw", &rw_vpd,
physaddr + sizeof(struct vpd_cbmem) +
header.ro_size, header.rw_size);
if (ret) {
vpd_section_destroy(&ro_vpd);
return ret;
}
}
return 0;
}
static int vpd_probe(struct coreboot_device *dev)
{
int ret;
vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
if (!vpd_kobj)
return -ENOMEM;
ret = vpd_sections_init(dev->cbmem_ref.cbmem_addr);
if (ret) {
kobject_put(vpd_kobj);
return ret;
}
return 0;
}
static void vpd_remove(struct coreboot_device *dev)
{
vpd_section_destroy(&ro_vpd);
vpd_section_destroy(&rw_vpd);
kobject_put(vpd_kobj);
}
static struct coreboot_driver vpd_driver = {
.probe = vpd_probe,
.remove = vpd_remove,
.drv = {
.name = "vpd",
},
.tag = CB_TAG_VPD,
};
module_coreboot_driver(vpd_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/vpd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* memconsole-coreboot.c
*
* Memory based BIOS console accessed through coreboot table.
*
* Copyright 2017 Google Inc.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include "memconsole.h"
#include "coreboot_table.h"
#define CB_TAG_CBMEM_CONSOLE 0x17
/* CBMEM firmware console log descriptor. */
struct cbmem_cons {
u32 size_dont_access_after_boot;
u32 cursor;
u8 body[];
} __packed;
#define CURSOR_MASK ((1 << 28) - 1)
#define OVERFLOW (1 << 31)
static struct cbmem_cons *cbmem_console;
static u32 cbmem_console_size;
/*
* The cbmem_console structure is read again on every access because it may
* change at any time if runtime firmware logs new messages. This may rarely
* lead to race conditions where the firmware overwrites the beginning of the
* ring buffer with more lines after we have already read |cursor|. It should be
* rare and harmless enough that we don't spend extra effort working around it.
*/
static ssize_t memconsole_coreboot_read(char *buf, loff_t pos, size_t count)
{
u32 cursor = cbmem_console->cursor & CURSOR_MASK;
u32 flags = cbmem_console->cursor & ~CURSOR_MASK;
u32 size = cbmem_console_size;
struct seg { /* describes ring buffer segments in logical order */
u32 phys; /* physical offset from start of mem buffer */
u32 len; /* length of segment */
} seg[2] = { {0}, {0} };
size_t done = 0;
int i;
if (flags & OVERFLOW) {
if (cursor > size) /* Shouldn't really happen, but... */
cursor = 0;
seg[0] = (struct seg){.phys = cursor, .len = size - cursor};
seg[1] = (struct seg){.phys = 0, .len = cursor};
} else {
seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)};
}
for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) {
done += memory_read_from_buffer(buf + done, count - done, &pos,
cbmem_console->body + seg[i].phys, seg[i].len);
pos -= seg[i].len;
}
return done;
}
static int memconsole_probe(struct coreboot_device *dev)
{
struct cbmem_cons *tmp_cbmc;
tmp_cbmc = memremap(dev->cbmem_ref.cbmem_addr,
sizeof(*tmp_cbmc), MEMREMAP_WB);
if (!tmp_cbmc)
return -ENOMEM;
/* Read size only once to prevent overrun attack through /dev/mem. */
cbmem_console_size = tmp_cbmc->size_dont_access_after_boot;
cbmem_console = devm_memremap(&dev->dev, dev->cbmem_ref.cbmem_addr,
cbmem_console_size + sizeof(*cbmem_console),
MEMREMAP_WB);
memunmap(tmp_cbmc);
if (IS_ERR(cbmem_console))
return PTR_ERR(cbmem_console);
memconsole_setup(memconsole_coreboot_read);
return memconsole_sysfs_init();
}
static void memconsole_remove(struct coreboot_device *dev)
{
memconsole_exit();
}
static struct coreboot_driver memconsole_driver = {
.probe = memconsole_probe,
.remove = memconsole_remove,
.drv = {
.name = "memconsole",
},
.tag = CB_TAG_CBMEM_CONSOLE,
};
module_coreboot_driver(memconsole_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/memconsole-coreboot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* memconsole-x86-legacy.c
*
* EBDA specific parts of the memory based BIOS console.
*
* Copyright 2017 Google Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/mm.h>
#include <asm/bios_ebda.h>
#include <linux/acpi.h>
#include "memconsole.h"
#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
struct biosmemcon_ebda {
u32 signature;
union {
struct {
u8 enabled;
u32 buffer_addr;
u16 start;
u16 end;
u16 num_chars;
u8 wrapped;
} __packed v1;
struct {
u32 buffer_addr;
/* Misdocumented as number of pages! */
u16 num_bytes;
u16 start;
u16 end;
} __packed v2;
};
} __packed;
static char *memconsole_baseaddr;
static size_t memconsole_length;
static ssize_t memconsole_read(char *buf, loff_t pos, size_t count)
{
return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
memconsole_length);
}
static void found_v1_header(struct biosmemcon_ebda *hdr)
{
pr_info("memconsole: BIOS console v1 EBDA structure found at %p\n",
hdr);
pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num = %d\n",
hdr->v1.buffer_addr, hdr->v1.start,
hdr->v1.end, hdr->v1.num_chars);
memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
memconsole_length = hdr->v1.num_chars;
memconsole_setup(memconsole_read);
}
static void found_v2_header(struct biosmemcon_ebda *hdr)
{
pr_info("memconsole: BIOS console v2 EBDA structure found at %p\n",
hdr);
pr_info("memconsole: BIOS console buffer at 0x%.8x, start = %d, end = %d, num_bytes = %d\n",
hdr->v2.buffer_addr, hdr->v2.start,
hdr->v2.end, hdr->v2.num_bytes);
memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr + hdr->v2.start);
memconsole_length = hdr->v2.end - hdr->v2.start;
memconsole_setup(memconsole_read);
}
/*
* Search through the EBDA for the BIOS Memory Console, and
* set the global variables to point to it. Return true if found.
*/
static bool memconsole_ebda_init(void)
{
unsigned int address;
size_t length, cur;
address = get_bios_ebda();
if (!address) {
pr_info("memconsole: BIOS EBDA non-existent.\n");
return false;
}
/* EBDA length is byte 0 of EBDA (in KB) */
length = *(u8 *)phys_to_virt(address);
length <<= 10; /* convert to bytes */
/*
* Search through EBDA for BIOS memory console structure
* note: signature is not necessarily dword-aligned
*/
for (cur = 0; cur < length; cur++) {
struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
/* memconsole v1 */
if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
found_v1_header(hdr);
return true;
}
/* memconsole v2 */
if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
found_v2_header(hdr);
return true;
}
}
pr_info("memconsole: BIOS console EBDA structure not found!\n");
return false;
}
static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
{
.ident = "Google Board",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
},
},
{}
};
MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
static bool __init memconsole_find(void)
{
if (!dmi_check_system(memconsole_dmi_table))
return false;
return memconsole_ebda_init();
}
static int __init memconsole_x86_init(void)
{
if (!memconsole_find())
return -ENODEV;
return memconsole_sysfs_init();
}
static void __exit memconsole_x86_exit(void)
{
memconsole_exit();
}
module_init(memconsole_x86_init);
module_exit(memconsole_x86_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/memconsole-x86-legacy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* framebuffer-coreboot.c
*
* Memory based framebuffer accessed through coreboot table.
*
* Copyright 2012-2013 David Herrmann <[email protected]>
* Copyright 2017 Google Inc.
* Copyright 2017 Samuel Holland <[email protected]>
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include "coreboot_table.h"
#define CB_TAG_FRAMEBUFFER 0x12
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
static int framebuffer_probe(struct coreboot_device *dev)
{
int i;
u32 length;
struct lb_framebuffer *fb = &dev->framebuffer;
struct platform_device *pdev;
struct resource res;
struct simplefb_platform_data pdata = {
.width = fb->x_resolution,
.height = fb->y_resolution,
.stride = fb->bytes_per_line,
.format = NULL,
};
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (fb->bits_per_pixel == formats[i].bits_per_pixel &&
fb->red_mask_pos == formats[i].red.offset &&
fb->red_mask_size == formats[i].red.length &&
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
return -ENODEV;
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = "Coreboot Framebuffer";
res.start = fb->physical_address;
length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
pdev = platform_device_register_resndata(&dev->dev,
"simple-framebuffer", 0,
&res, 1, &pdata,
sizeof(pdata));
if (IS_ERR(pdev))
pr_warn("coreboot: could not register framebuffer\n");
else
dev_set_drvdata(&dev->dev, pdev);
return PTR_ERR_OR_ZERO(pdev);
}
static void framebuffer_remove(struct coreboot_device *dev)
{
struct platform_device *pdev = dev_get_drvdata(&dev->dev);
platform_device_unregister(pdev);
}
static struct coreboot_driver framebuffer_driver = {
.probe = framebuffer_probe,
.remove = framebuffer_remove,
.drv = {
.name = "framebuffer",
},
.tag = CB_TAG_FRAMEBUFFER,
};
module_coreboot_driver(framebuffer_driver);
MODULE_AUTHOR("Samuel Holland <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/framebuffer-coreboot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2010 Google Inc. All Rights Reserved.
* Author: [email protected] (Duncan Laurie)
*
* Re-worked to expose sysfs APIs by [email protected] (Mike Waychison)
*
* EFI SMI interface for Google platforms
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/panic_notifier.h>
#include <linux/ioctl.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/dmi.h>
#include <linux/kdebug.h>
#include <linux/reboot.h>
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/ucs2_string.h>
#include <linux/suspend.h>
#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
/* TODO([email protected]): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
#define GSMI_SHUTDOWN_NMIWDT 1 /* NMI Watchdog */
#define GSMI_SHUTDOWN_PANIC 2 /* Panic */
#define GSMI_SHUTDOWN_OOPS 3 /* Oops */
#define GSMI_SHUTDOWN_DIE 4 /* Die -- No longer meaningful */
#define GSMI_SHUTDOWN_MCE 5 /* Machine Check */
#define GSMI_SHUTDOWN_SOFTWDT 6 /* Software Watchdog */
#define GSMI_SHUTDOWN_MBE 7 /* Uncorrected ECC */
#define GSMI_SHUTDOWN_TRIPLE 8 /* Triple Fault */
#define DRIVER_VERSION "1.0"
#define GSMI_GUID_SIZE 16
#define GSMI_BUF_SIZE 1024
#define GSMI_BUF_ALIGN sizeof(u64)
#define GSMI_CALLBACK 0xef
/* SMI return codes */
#define GSMI_SUCCESS 0x00
#define GSMI_UNSUPPORTED2 0x03
#define GSMI_LOG_FULL 0x0b
#define GSMI_VAR_NOT_FOUND 0x0e
#define GSMI_HANDSHAKE_SPIN 0x7d
#define GSMI_HANDSHAKE_CF 0x7e
#define GSMI_HANDSHAKE_NONE 0x7f
#define GSMI_INVALID_PARAMETER 0x82
#define GSMI_UNSUPPORTED 0x83
#define GSMI_BUFFER_TOO_SMALL 0x85
#define GSMI_NOT_READY 0x86
#define GSMI_DEVICE_ERROR 0x87
#define GSMI_NOT_FOUND 0x8e
#define QUIRKY_BOARD_HASH 0x78a30a50
/* Internally used commands passed to the firmware */
#define GSMI_CMD_GET_NVRAM_VAR 0x01
#define GSMI_CMD_GET_NEXT_VAR 0x02
#define GSMI_CMD_SET_NVRAM_VAR 0x03
#define GSMI_CMD_SET_EVENT_LOG 0x08
#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
#define GSMI_CMD_CLEAR_CONFIG 0x20
#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
#define GSMI_CMD_RESERVED 0xff
/* Magic entry type for kernel events */
#define GSMI_LOG_ENTRY_TYPE_KERNEL 0xDEAD
/* SMI buffers must be in 32bit physical address space */
struct gsmi_buf {
u8 *start; /* start of buffer */
size_t length; /* length of buffer */
u32 address; /* physical address of buffer */
};
static struct gsmi_device {
struct platform_device *pdev; /* platform device */
struct gsmi_buf *name_buf; /* variable name buffer */
struct gsmi_buf *data_buf; /* generic data buffer */
struct gsmi_buf *param_buf; /* parameter buffer */
spinlock_t lock; /* serialize access to SMIs */
u16 smi_cmd; /* SMI command port */
int handshake_type; /* firmware handler interlock type */
struct kmem_cache *mem_pool; /* kmem cache for gsmi_buf allocations */
} gsmi_dev;
/* Packed structures for communicating with the firmware */
struct gsmi_nvram_var_param {
efi_guid_t guid;
u32 name_ptr;
u32 attributes;
u32 data_len;
u32 data_ptr;
} __packed;
struct gsmi_get_next_var_param {
u8 guid[GSMI_GUID_SIZE];
u32 name_ptr;
u32 name_len;
} __packed;
struct gsmi_set_eventlog_param {
u32 data_ptr;
u32 data_len;
u32 type;
} __packed;
/* Event log formats */
struct gsmi_log_entry_type_1 {
u16 type;
u32 instance;
} __packed;
/*
* Some platforms don't have explicit SMI handshake
* and need to wait for SMI to complete.
*/
#define GSMI_DEFAULT_SPINCOUNT 0x10000
static unsigned int spincount = GSMI_DEFAULT_SPINCOUNT;
module_param(spincount, uint, 0600);
MODULE_PARM_DESC(spincount,
"The number of loop iterations to use when using the spin handshake.");
/*
* Some older platforms with Apollo Lake chipsets do not support S0ix logging
* in their GSMI handlers, and behaved poorly when resuming via power button
* press if the logging was attempted. Updated firmware with proper behavior
* has long since shipped, removing the need for this opt-in parameter. It
* now exists as an opt-out parameter for folks defiantly running old
* firmware, or unforeseen circumstances. After the change from opt-in to
* opt-out has baked sufficiently, this parameter should probably be removed
* entirely.
*/
static bool s0ix_logging_enable = true;
module_param(s0ix_logging_enable, bool, 0600);
static struct gsmi_buf *gsmi_buf_alloc(void)
{
struct gsmi_buf *smibuf;
smibuf = kzalloc(sizeof(*smibuf), GFP_KERNEL);
if (!smibuf) {
printk(KERN_ERR "gsmi: out of memory\n");
return NULL;
}
/* allocate buffer in 32bit address space */
smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL);
if (!smibuf->start) {
printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
kfree(smibuf);
return NULL;
}
/* fill in the buffer handle */
smibuf->length = GSMI_BUF_SIZE;
smibuf->address = (u32)virt_to_phys(smibuf->start);
return smibuf;
}
static void gsmi_buf_free(struct gsmi_buf *smibuf)
{
if (smibuf) {
if (smibuf->start)
kmem_cache_free(gsmi_dev.mem_pool, smibuf->start);
kfree(smibuf);
}
}
/*
* Make a call to gsmi func(sub). GSMI error codes are translated to
* in-kernel errnos (0 on success, -ERRNO on error).
*/
static int gsmi_exec(u8 func, u8 sub)
{
u16 cmd = (sub << 8) | func;
u16 result = 0;
int rc = 0;
/*
* AH : Subfunction number
* AL : Function number
* EBX : Parameter block address
* DX : SMI command port
*
* Three protocols here. See also the comment in gsmi_init().
*/
if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_CF) {
/*
* If handshake_type == HANDSHAKE_CF then set CF on the
* way in and wait for the handler to clear it; this avoids
* corrupting register state on those chipsets which have
* a delay between writing the SMI trigger register and
* entering SMM.
*/
asm volatile (
"stc\n"
"outb %%al, %%dx\n"
"1: jc 1b\n"
: "=a" (result)
: "0" (cmd),
"d" (gsmi_dev.smi_cmd),
"b" (gsmi_dev.param_buf->address)
: "memory", "cc"
);
} else if (gsmi_dev.handshake_type == GSMI_HANDSHAKE_SPIN) {
/*
* If handshake_type == HANDSHAKE_SPIN we spin a
* hundred-ish usecs to ensure the SMI has triggered.
*/
asm volatile (
"outb %%al, %%dx\n"
"1: loop 1b\n"
: "=a" (result)
: "0" (cmd),
"d" (gsmi_dev.smi_cmd),
"b" (gsmi_dev.param_buf->address),
"c" (spincount)
: "memory", "cc"
);
} else {
/*
* If handshake_type == HANDSHAKE_NONE we do nothing;
* either we don't need to or it's legacy firmware that
* doesn't understand the CF protocol.
*/
asm volatile (
"outb %%al, %%dx\n\t"
: "=a" (result)
: "0" (cmd),
"d" (gsmi_dev.smi_cmd),
"b" (gsmi_dev.param_buf->address)
: "memory", "cc"
);
}
/* check return code from SMI handler */
switch (result) {
case GSMI_SUCCESS:
break;
case GSMI_VAR_NOT_FOUND:
/* not really an error, but let the caller know */
rc = 1;
break;
case GSMI_INVALID_PARAMETER:
printk(KERN_ERR "gsmi: exec 0x%04x: Invalid parameter\n", cmd);
rc = -EINVAL;
break;
case GSMI_BUFFER_TOO_SMALL:
printk(KERN_ERR "gsmi: exec 0x%04x: Buffer too small\n", cmd);
rc = -ENOMEM;
break;
case GSMI_UNSUPPORTED:
case GSMI_UNSUPPORTED2:
if (sub != GSMI_CMD_HANDSHAKE_TYPE)
printk(KERN_ERR "gsmi: exec 0x%04x: Not supported\n",
cmd);
rc = -ENOSYS;
break;
case GSMI_NOT_READY:
printk(KERN_ERR "gsmi: exec 0x%04x: Not ready\n", cmd);
rc = -EBUSY;
break;
case GSMI_DEVICE_ERROR:
printk(KERN_ERR "gsmi: exec 0x%04x: Device error\n", cmd);
rc = -EFAULT;
break;
case GSMI_NOT_FOUND:
printk(KERN_ERR "gsmi: exec 0x%04x: Data not found\n", cmd);
rc = -ENOENT;
break;
case GSMI_LOG_FULL:
printk(KERN_ERR "gsmi: exec 0x%04x: Log full\n", cmd);
rc = -ENOSPC;
break;
case GSMI_HANDSHAKE_CF:
case GSMI_HANDSHAKE_SPIN:
case GSMI_HANDSHAKE_NONE:
rc = result;
break;
default:
printk(KERN_ERR "gsmi: exec 0x%04x: Unknown error 0x%04x\n",
cmd, result);
rc = -ENXIO;
}
return rc;
}
#ifdef CONFIG_EFI
static struct efivars efivars;
static efi_status_t gsmi_get_variable(efi_char16_t *name,
efi_guid_t *vendor, u32 *attr,
unsigned long *data_size,
void *data)
{
struct gsmi_nvram_var_param param = {
.name_ptr = gsmi_dev.name_buf->address,
.data_ptr = gsmi_dev.data_buf->address,
.data_len = (u32)*data_size,
};
efi_status_t ret = EFI_SUCCESS;
unsigned long flags;
size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
int rc;
if (name_len >= GSMI_BUF_SIZE / 2)
return EFI_BAD_BUFFER_SIZE;
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* Vendor guid */
memcpy(¶m.guid, vendor, sizeof(param.guid));
/* variable name, already in UTF-16 */
memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
/* data pointer */
memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
/* parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NVRAM_VAR);
if (rc < 0) {
printk(KERN_ERR "gsmi: Get Variable failed\n");
ret = EFI_LOAD_ERROR;
} else if (rc == 1) {
/* variable was not found */
ret = EFI_NOT_FOUND;
} else {
/* Get the arguments back */
memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param));
/* The size reported is the min of all of our buffers */
*data_size = min_t(unsigned long, *data_size,
gsmi_dev.data_buf->length);
*data_size = min_t(unsigned long, *data_size, param.data_len);
/* Copy data back to return buffer. */
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
if (attr)
*attr = EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return ret;
}
static efi_status_t gsmi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
struct gsmi_get_next_var_param param = {
.name_ptr = gsmi_dev.name_buf->address,
.name_len = gsmi_dev.name_buf->length,
};
efi_status_t ret = EFI_SUCCESS;
int rc;
unsigned long flags;
/* For the moment, only support buffers that exactly match in size */
if (*name_size != GSMI_BUF_SIZE)
return EFI_BAD_BUFFER_SIZE;
/* Let's make sure the thing is at least null-terminated */
if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2)
return EFI_INVALID_PARAMETER;
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* guid */
memcpy(¶m.guid, vendor, sizeof(param.guid));
/* variable name, already in UTF-16 */
memcpy(gsmi_dev.name_buf->start, name, *name_size);
/* parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_GET_NEXT_VAR);
if (rc < 0) {
printk(KERN_ERR "gsmi: Get Next Variable Name failed\n");
ret = EFI_LOAD_ERROR;
} else if (rc == 1) {
/* variable not found -- end of list */
ret = EFI_NOT_FOUND;
} else {
/* copy variable data back to return buffer */
memcpy(¶m, gsmi_dev.param_buf->start, sizeof(param));
/* Copy the name back */
memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE);
*name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2;
/* copy guid to return buffer */
memcpy(vendor, ¶m.guid, sizeof(param.guid));
ret = EFI_SUCCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return ret;
}
static efi_status_t gsmi_set_variable(efi_char16_t *name,
efi_guid_t *vendor,
u32 attr,
unsigned long data_size,
void *data)
{
struct gsmi_nvram_var_param param = {
.name_ptr = gsmi_dev.name_buf->address,
.data_ptr = gsmi_dev.data_buf->address,
.data_len = (u32)data_size,
.attributes = EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS,
};
size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2);
efi_status_t ret = EFI_SUCCESS;
int rc;
unsigned long flags;
if (name_len >= GSMI_BUF_SIZE / 2)
return EFI_BAD_BUFFER_SIZE;
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* guid */
memcpy(¶m.guid, vendor, sizeof(param.guid));
/* variable name, already in UTF-16 */
memset(gsmi_dev.name_buf->start, 0, gsmi_dev.name_buf->length);
memcpy(gsmi_dev.name_buf->start, name, name_len * 2);
/* data pointer */
memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
memcpy(gsmi_dev.data_buf->start, data, data_size);
/* parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_NVRAM_VAR);
if (rc < 0) {
printk(KERN_ERR "gsmi: Set Variable failed\n");
ret = EFI_INVALID_PARAMETER;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return ret;
}
static const struct efivar_operations efivar_ops = {
.get_variable = gsmi_get_variable,
.set_variable = gsmi_set_variable,
.get_next_variable = gsmi_get_next_variable,
};
#endif /* CONFIG_EFI */
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
struct gsmi_set_eventlog_param param = {
.data_ptr = gsmi_dev.data_buf->address,
};
int rc = 0;
unsigned long flags;
/* Pull the type out */
if (count < sizeof(u32))
return -EINVAL;
param.type = *(u32 *)buf;
buf += sizeof(u32);
/* The remaining buffer is the data payload */
if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL;
param.data_len = count - sizeof(u32);
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* data pointer */
memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
memcpy(gsmi_dev.data_buf->start, buf, param.data_len);
/* parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
if (rc < 0)
printk(KERN_ERR "gsmi: Set Event Log failed\n");
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
return (rc == 0) ? count : rc;
}
static struct bin_attribute eventlog_bin_attr = {
.attr = {.name = "append_to_eventlog", .mode = 0200},
.write = eventlog_write,
};
static ssize_t gsmi_clear_eventlog_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int rc;
unsigned long flags;
unsigned long val;
struct {
u32 percentage;
u32 data_type;
} param;
rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
/*
* Value entered is a percentage, 0 through 100, anything else
* is invalid.
*/
if (val > 100)
return -EINVAL;
/* data_type here selects the smbios event log. */
param.percentage = val;
param.data_type = 0;
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_EVENT_LOG);
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
if (rc)
return rc;
return count;
}
static struct kobj_attribute gsmi_clear_eventlog_attr = {
.attr = {.name = "clear_eventlog", .mode = 0200},
.store = gsmi_clear_eventlog_store,
};
static ssize_t gsmi_clear_config_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int rc;
unsigned long flags;
spin_lock_irqsave(&gsmi_dev.lock, flags);
/* clear parameter buffer */
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_CLEAR_CONFIG);
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
if (rc)
return rc;
return count;
}
static struct kobj_attribute gsmi_clear_config_attr = {
.attr = {.name = "clear_config", .mode = 0200},
.store = gsmi_clear_config_store,
};
static const struct attribute *gsmi_attrs[] = {
&gsmi_clear_config_attr.attr,
&gsmi_clear_eventlog_attr.attr,
NULL,
};
static int gsmi_shutdown_reason(int reason)
{
struct gsmi_log_entry_type_1 entry = {
.type = GSMI_LOG_ENTRY_TYPE_KERNEL,
.instance = reason,
};
struct gsmi_set_eventlog_param param = {
.data_len = sizeof(entry),
.type = 1,
};
static int saved_reason;
int rc = 0;
unsigned long flags;
/* avoid duplicate entries in the log */
if (saved_reason & (1 << reason))
return 0;
spin_lock_irqsave(&gsmi_dev.lock, flags);
saved_reason |= (1 << reason);
/* data pointer */
memset(gsmi_dev.data_buf->start, 0, gsmi_dev.data_buf->length);
memcpy(gsmi_dev.data_buf->start, &entry, sizeof(entry));
/* parameter buffer */
param.data_ptr = gsmi_dev.data_buf->address;
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
memcpy(gsmi_dev.param_buf->start, ¶m, sizeof(param));
rc = gsmi_exec(GSMI_CALLBACK, GSMI_CMD_SET_EVENT_LOG);
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
if (rc < 0)
printk(KERN_ERR "gsmi: Log Shutdown Reason failed\n");
else
printk(KERN_EMERG "gsmi: Log Shutdown Reason 0x%02x\n",
reason);
return rc;
}
static int gsmi_reboot_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
gsmi_shutdown_reason(GSMI_SHUTDOWN_CLEAN);
return NOTIFY_DONE;
}
static struct notifier_block gsmi_reboot_notifier = {
.notifier_call = gsmi_reboot_callback
};
static int gsmi_die_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
if (reason == DIE_OOPS)
gsmi_shutdown_reason(GSMI_SHUTDOWN_OOPS);
return NOTIFY_DONE;
}
static struct notifier_block gsmi_die_notifier = {
.notifier_call = gsmi_die_callback
};
static int gsmi_panic_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
/*
* Panic callbacks are executed with all other CPUs stopped,
* so we must not attempt to spin waiting for gsmi_dev.lock
* to be released.
*/
if (spin_is_locked(&gsmi_dev.lock))
return NOTIFY_DONE;
gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
return NOTIFY_DONE;
}
static struct notifier_block gsmi_panic_notifier = {
.notifier_call = gsmi_panic_callback,
};
/*
* This hash function was blatantly copied from include/linux/hash.h.
* It is used by this driver to obfuscate a board name that requires a
* quirk within this driver.
*
* Please do not remove this copy of the function as any changes to the
* global utility hash_64() function would break this driver's ability
* to identify a board and provide the appropriate quirk -- [email protected]
*/
static u64 __init local_hash_64(u64 val, unsigned bits)
{
u64 hash = val;
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
u64 n = hash;
n <<= 18;
hash -= n;
n <<= 33;
hash -= n;
n <<= 3;
hash += n;
n <<= 3;
hash -= n;
n <<= 4;
hash += n;
n <<= 2;
hash += n;
/* High bits are more random, so use them. */
return hash >> (64 - bits);
}
static u32 __init hash_oem_table_id(char s[8])
{
u64 input;
memcpy(&input, s, 8);
return local_hash_64(input, 32);
}
static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
{
.ident = "Google Board",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
},
},
{
.ident = "Coreboot Firmware",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
},
},
{}
};
MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
static __init int gsmi_system_valid(void)
{
u32 hash;
u16 cmd, result;
if (!dmi_check_system(gsmi_dmi_table))
return -ENODEV;
/*
* Only newer firmware supports the gsmi interface. All older
* firmware that didn't support this interface used to plug the
* table name in the first four bytes of the oem_table_id field.
* Newer firmware doesn't do that though, so use that as the
* discriminant factor. We have to do this in order to
* whitewash our board names out of the public driver.
*/
if (!strncmp(acpi_gbl_FADT.header.oem_table_id, "FACP", 4)) {
printk(KERN_INFO "gsmi: Board is too old\n");
return -ENODEV;
}
/* Disable on board with 1.0 BIOS due to Google bug 2602657 */
hash = hash_oem_table_id(acpi_gbl_FADT.header.oem_table_id);
if (hash == QUIRKY_BOARD_HASH) {
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (strncmp(bios_ver, "1.0", 3) == 0) {
pr_info("gsmi: disabled on this board's BIOS %s\n",
bios_ver);
return -ENODEV;
}
}
/* check for valid SMI command port in ACPI FADT */
if (acpi_gbl_FADT.smi_command == 0) {
pr_info("gsmi: missing smi_command\n");
return -ENODEV;
}
/* Test the smihandler with a bogus command. If it leaves the
* calling argument in %ax untouched, there is no handler for
* GSMI commands.
*/
cmd = GSMI_CALLBACK | GSMI_CMD_RESERVED << 8;
asm volatile (
"outb %%al, %%dx\n\t"
: "=a" (result)
: "0" (cmd),
"d" (acpi_gbl_FADT.smi_command)
: "memory", "cc"
);
if (cmd == result) {
pr_info("gsmi: no gsmi handler in firmware\n");
return -ENODEV;
}
/* Found */
return 0;
}
static struct kobject *gsmi_kobj;
static const struct platform_device_info gsmi_dev_info = {
.name = "gsmi",
.id = -1,
/* SMI callbacks require 32bit addresses */
.dma_mask = DMA_BIT_MASK(32),
};
#ifdef CONFIG_PM
static void gsmi_log_s0ix_info(u8 cmd)
{
unsigned long flags;
/*
* If platform has not enabled S0ix logging, then no action is
* necessary.
*/
if (!s0ix_logging_enable)
return;
spin_lock_irqsave(&gsmi_dev.lock, flags);
memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
gsmi_exec(GSMI_CALLBACK, cmd);
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
}
static int gsmi_log_s0ix_suspend(struct device *dev)
{
/*
* If system is not suspending via firmware using the standard ACPI Sx
* types, then make a GSMI call to log the suspend info.
*/
if (!pm_suspend_via_firmware())
gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
/*
* Always return success, since we do not want suspend
* to fail just because of logging failure.
*/
return 0;
}
static int gsmi_log_s0ix_resume(struct device *dev)
{
/*
* If system did not resume via firmware, then make a GSMI call to log
* the resume info and wake source.
*/
if (!pm_resume_via_firmware())
gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
/*
* Always return success, since we do not want resume
* to fail just because of logging failure.
*/
return 0;
}
static const struct dev_pm_ops gsmi_pm_ops = {
.suspend_noirq = gsmi_log_s0ix_suspend,
.resume_noirq = gsmi_log_s0ix_resume,
};
static int gsmi_platform_driver_probe(struct platform_device *dev)
{
return 0;
}
static struct platform_driver gsmi_driver_info = {
.driver = {
.name = "gsmi",
.pm = &gsmi_pm_ops,
},
.probe = gsmi_platform_driver_probe,
};
#endif
static __init int gsmi_init(void)
{
unsigned long flags;
int ret;
ret = gsmi_system_valid();
if (ret)
return ret;
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
#ifdef CONFIG_PM
ret = platform_driver_register(&gsmi_driver_info);
if (unlikely(ret)) {
printk(KERN_ERR "gsmi: unable to register platform driver\n");
return ret;
}
#endif
/* register device */
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
printk(KERN_ERR "gsmi: unable to register platform device\n");
return PTR_ERR(gsmi_dev.pdev);
}
/* SMI access needs to be serialized */
spin_lock_init(&gsmi_dev.lock);
ret = -ENOMEM;
/*
* SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the
* allocations for gsmi_buf come from the DMA32 memory zone. These
* buffers have nothing to do with DMA. They are required for
* communication with firmware executing in SMI mode which can only
* access the bottom 4GiB of physical memory. Since DMA32 memory zone
* guarantees allocation under the 4GiB boundary, this driver creates
* a SLAB cache with SLAB_CACHE_DMA32 flag.
*/
gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE,
GSMI_BUF_ALIGN,
SLAB_CACHE_DMA32, NULL);
if (!gsmi_dev.mem_pool)
goto out_err;
/*
* pre-allocate buffers because sometimes we are called when
* this is not feasible: oops, panic, die, mce, etc
*/
gsmi_dev.name_buf = gsmi_buf_alloc();
if (!gsmi_dev.name_buf) {
printk(KERN_ERR "gsmi: failed to allocate name buffer\n");
goto out_err;
}
gsmi_dev.data_buf = gsmi_buf_alloc();
if (!gsmi_dev.data_buf) {
printk(KERN_ERR "gsmi: failed to allocate data buffer\n");
goto out_err;
}
gsmi_dev.param_buf = gsmi_buf_alloc();
if (!gsmi_dev.param_buf) {
printk(KERN_ERR "gsmi: failed to allocate param buffer\n");
goto out_err;
}
/*
* Determine type of handshake used to serialize the SMI
* entry. See also gsmi_exec().
*
* There's a "behavior" present on some chipsets where writing the
* SMI trigger register in the southbridge doesn't result in an
* immediate SMI. Rather, the processor can execute "a few" more
* instructions before the SMI takes effect. To ensure synchronous
* behavior, implement a handshake between the kernel driver and the
* firmware handler to spin until released. This ioctl determines
* the type of handshake.
*
* NONE: The firmware handler does not implement any
* handshake. Either it doesn't need to, or it's legacy firmware
* that doesn't know it needs to and never will.
*
* CF: The firmware handler will clear the CF in the saved
* state before returning. The driver may set the CF and test for
* it to clear before proceeding.
*
* SPIN: The firmware handler does not implement any handshake
* but the driver should spin for a hundred or so microseconds
* to ensure the SMI has triggered.
*
* Finally, the handler will return -ENOSYS if
* GSMI_CMD_HANDSHAKE_TYPE is unimplemented, which implies
* HANDSHAKE_NONE.
*/
spin_lock_irqsave(&gsmi_dev.lock, flags);
gsmi_dev.handshake_type = GSMI_HANDSHAKE_SPIN;
gsmi_dev.handshake_type =
gsmi_exec(GSMI_CALLBACK, GSMI_CMD_HANDSHAKE_TYPE);
if (gsmi_dev.handshake_type == -ENOSYS)
gsmi_dev.handshake_type = GSMI_HANDSHAKE_NONE;
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
/* Remove and clean up gsmi if the handshake could not complete. */
if (gsmi_dev.handshake_type == -ENXIO) {
printk(KERN_INFO "gsmi version " DRIVER_VERSION
" failed to load\n");
ret = -ENODEV;
goto out_err;
}
/* Register in the firmware directory */
ret = -ENOMEM;
gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
if (!gsmi_kobj) {
printk(KERN_INFO "gsmi: Failed to create firmware kobj\n");
goto out_err;
}
/* Setup eventlog access */
ret = sysfs_create_bin_file(gsmi_kobj, &eventlog_bin_attr);
if (ret) {
printk(KERN_INFO "gsmi: Failed to setup eventlog");
goto out_err;
}
/* Other attributes */
ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
if (ret) {
printk(KERN_INFO "gsmi: Failed to add attrs");
goto out_remove_bin_file;
}
#ifdef CONFIG_EFI
ret = efivars_register(&efivars, &efivar_ops);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
goto out_remove_bin_file;
}
#endif
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
&gsmi_panic_notifier);
printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
return 0;
out_remove_bin_file:
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
out_err:
kobject_put(gsmi_kobj);
gsmi_buf_free(gsmi_dev.param_buf);
gsmi_buf_free(gsmi_dev.data_buf);
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
pr_info("gsmi: failed to load: %d\n", ret);
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
return ret;
}
static void __exit gsmi_exit(void)
{
unregister_reboot_notifier(&gsmi_reboot_notifier);
unregister_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&gsmi_panic_notifier);
#ifdef CONFIG_EFI
efivars_unregister(&efivars);
#endif
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
kobject_put(gsmi_kobj);
gsmi_buf_free(gsmi_dev.param_buf);
gsmi_buf_free(gsmi_dev.data_buf);
gsmi_buf_free(gsmi_dev.name_buf);
kmem_cache_destroy(gsmi_dev.mem_pool);
platform_device_unregister(gsmi_dev.pdev);
#ifdef CONFIG_PM
platform_driver_unregister(&gsmi_driver_info);
#endif
}
module_init(gsmi_init);
module_exit(gsmi_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/gsmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* coreboot_table.c
*
* Module providing coreboot table access.
*
* Copyright 2017 Google Inc.
* Copyright 2017 Samuel Holland <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "coreboot_table.h"
#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(drv);
return device->entry.tag == driver->tag;
}
static int coreboot_bus_probe(struct device *dev)
{
int ret = -ENODEV;
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(dev->driver);
if (driver->probe)
ret = driver->probe(device);
return ret;
}
static void coreboot_bus_remove(struct device *dev)
{
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(dev->driver);
if (driver->remove)
driver->remove(device);
}
static struct bus_type coreboot_bus_type = {
.name = "coreboot",
.match = coreboot_bus_match,
.probe = coreboot_bus_probe,
.remove = coreboot_bus_remove,
};
static void coreboot_device_release(struct device *dev)
{
struct coreboot_device *device = CB_DEV(dev);
kfree(device);
}
int coreboot_driver_register(struct coreboot_driver *driver)
{
driver->drv.bus = &coreboot_bus_type;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(coreboot_driver_register);
void coreboot_driver_unregister(struct coreboot_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(coreboot_driver_unregister);
static int coreboot_table_populate(struct device *dev, void *ptr)
{
int i, ret;
void *ptr_entry;
struct coreboot_device *device;
struct coreboot_table_entry *entry;
struct coreboot_table_header *header = ptr;
ptr_entry = ptr + header->header_bytes;
for (i = 0; i < header->table_entries; i++) {
entry = ptr_entry;
if (entry->size < sizeof(*entry)) {
dev_warn(dev, "coreboot table entry too small!\n");
return -EINVAL;
}
device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
if (!device)
return -ENOMEM;
device->dev.parent = dev;
device->dev.bus = &coreboot_bus_type;
device->dev.release = coreboot_device_release;
memcpy(device->raw, ptr_entry, entry->size);
switch (device->entry.tag) {
case LB_TAG_CBMEM_ENTRY:
dev_set_name(&device->dev, "cbmem-%08x",
device->cbmem_entry.id);
break;
default:
dev_set_name(&device->dev, "coreboot%d", i);
break;
}
ret = device_register(&device->dev);
if (ret) {
put_device(&device->dev);
return ret;
}
ptr_entry += entry->size;
}
return 0;
}
static int coreboot_table_probe(struct platform_device *pdev)
{
resource_size_t len;
struct coreboot_table_header *header;
struct resource *res;
struct device *dev = &pdev->dev;
void *ptr;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
len = resource_size(res);
if (!res->start || !len)
return -EINVAL;
/* Check just the header first to make sure things are sane */
header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
if (!header)
return -ENOMEM;
len = header->header_bytes + header->table_bytes;
ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
memunmap(header);
if (ret) {
dev_warn(dev, "coreboot table missing or corrupt!\n");
return -ENODEV;
}
ptr = memremap(res->start, len, MEMREMAP_WB);
if (!ptr)
return -ENOMEM;
ret = coreboot_table_populate(dev, ptr);
memunmap(ptr);
return ret;
}
static int __cb_dev_unregister(struct device *dev, void *dummy)
{
device_unregister(dev);
return 0;
}
static int coreboot_table_remove(struct platform_device *pdev)
{
bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister);
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_coreboot_acpi_match[] = {
{ "GOOGCB00", 0 },
{ "BOOT0000", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id coreboot_of_match[] = {
{ .compatible = "coreboot" },
{}
};
MODULE_DEVICE_TABLE(of, coreboot_of_match);
#endif
static struct platform_driver coreboot_table_driver = {
.probe = coreboot_table_probe,
.remove = coreboot_table_remove,
.driver = {
.name = "coreboot_table",
.acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
.of_match_table = of_match_ptr(coreboot_of_match),
},
};
static int __init coreboot_table_driver_init(void)
{
int ret;
ret = bus_register(&coreboot_bus_type);
if (ret)
return ret;
ret = platform_driver_register(&coreboot_table_driver);
if (ret) {
bus_unregister(&coreboot_bus_type);
return ret;
}
return 0;
}
static void __exit coreboot_table_driver_exit(void)
{
platform_driver_unregister(&coreboot_table_driver);
bus_unregister(&coreboot_bus_type);
}
module_init(coreboot_table_driver_init);
module_exit(coreboot_table_driver_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/coreboot_table.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cbmem.c
*
* Driver for exporting cbmem entries in sysfs.
*
* Copyright 2022 Google LLC
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "coreboot_table.h"
struct cbmem_entry {
char *mem_file_buf;
u32 size;
};
static struct cbmem_entry *to_cbmem_entry(struct kobject *kobj)
{
return dev_get_drvdata(kobj_to_dev(kobj));
}
static ssize_t mem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
return memory_read_from_buffer(buf, count, &pos, entry->mem_file_buf,
entry->size);
}
static ssize_t mem_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t pos,
size_t count)
{
struct cbmem_entry *entry = to_cbmem_entry(kobj);
if (pos < 0 || pos >= entry->size)
return -EINVAL;
if (count > entry->size - pos)
count = entry->size - pos;
memcpy(entry->mem_file_buf + pos, buf, count);
return count;
}
static BIN_ATTR_ADMIN_RW(mem, 0);
static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct coreboot_device *cbdev = dev_to_coreboot_device(dev);
return sysfs_emit(buf, "0x%llx\n", cbdev->cbmem_entry.address);
}
static DEVICE_ATTR_RO(address);
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct coreboot_device *cbdev = dev_to_coreboot_device(dev);
return sysfs_emit(buf, "0x%x\n", cbdev->cbmem_entry.entry_size);
}
static DEVICE_ATTR_RO(size);
static struct attribute *attrs[] = {
&dev_attr_address.attr,
&dev_attr_size.attr,
NULL,
};
static struct bin_attribute *bin_attrs[] = {
&bin_attr_mem,
NULL,
};
static const struct attribute_group cbmem_entry_group = {
.attrs = attrs,
.bin_attrs = bin_attrs,
};
static const struct attribute_group *dev_groups[] = {
&cbmem_entry_group,
NULL,
};
static int cbmem_entry_probe(struct coreboot_device *dev)
{
struct cbmem_entry *entry;
entry = devm_kzalloc(&dev->dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
dev_set_drvdata(&dev->dev, entry);
entry->mem_file_buf = devm_memremap(&dev->dev, dev->cbmem_entry.address,
dev->cbmem_entry.entry_size,
MEMREMAP_WB);
if (IS_ERR(entry->mem_file_buf))
return PTR_ERR(entry->mem_file_buf);
entry->size = dev->cbmem_entry.entry_size;
return 0;
}
static struct coreboot_driver cbmem_entry_driver = {
.probe = cbmem_entry_probe,
.drv = {
.name = "cbmem",
.owner = THIS_MODULE,
.dev_groups = dev_groups,
},
.tag = LB_TAG_CBMEM_ENTRY,
};
module_coreboot_driver(cbmem_entry_driver);
MODULE_AUTHOR("Jack Rosenthal <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/cbmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vpd_decode.c
*
* Google VPD decoding routines.
*
* Copyright 2017 Google Inc.
*/
#include "vpd_decode.h"
static int vpd_decode_len(const u32 max_len, const u8 *in,
u32 *length, u32 *decoded_len)
{
u8 more;
int i = 0;
if (!length || !decoded_len)
return VPD_FAIL;
*length = 0;
do {
if (i >= max_len)
return VPD_FAIL;
more = in[i] & 0x80;
*length <<= 7;
*length |= in[i] & 0x7f;
++i;
} while (more);
*decoded_len = i;
return VPD_OK;
}
static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
u32 *_consumed, const u8 **entry, u32 *entry_len)
{
u32 decoded_len;
u32 consumed = *_consumed;
if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
entry_len, &decoded_len) != VPD_OK)
return VPD_FAIL;
if (max_len - consumed < decoded_len)
return VPD_FAIL;
consumed += decoded_len;
*entry = input_buf + consumed;
/* entry_len is untrusted data and must be checked again. */
if (max_len - consumed < *entry_len)
return VPD_FAIL;
consumed += *entry_len;
*_consumed = consumed;
return VPD_OK;
}
int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
vpd_decode_callback callback, void *callback_arg)
{
int type;
u32 key_len;
u32 value_len;
const u8 *key;
const u8 *value;
/* type */
if (*consumed >= max_len)
return VPD_FAIL;
type = input_buf[*consumed];
switch (type) {
case VPD_TYPE_INFO:
case VPD_TYPE_STRING:
(*consumed)++;
if (vpd_decode_entry(max_len, input_buf, consumed, &key,
&key_len) != VPD_OK)
return VPD_FAIL;
if (vpd_decode_entry(max_len, input_buf, consumed, &value,
&value_len) != VPD_OK)
return VPD_FAIL;
if (type == VPD_TYPE_STRING)
return callback(key, key_len, value, value_len,
callback_arg);
break;
default:
return VPD_FAIL;
}
return VPD_OK;
}
| linux-master | drivers/firmware/google/vpd_decode.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* memconsole.c
*
* Architecture-independent parts of the memory based BIOS console.
*
* Copyright 2017 Google Inc.
*/
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include "memconsole.h"
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
ssize_t (*memconsole_read_func)(char *, loff_t, size_t);
memconsole_read_func = bin_attr->private;
if (WARN_ON_ONCE(!memconsole_read_func))
return -EIO;
return memconsole_read_func(buf, pos, count);
}
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
.read = memconsole_read,
};
void memconsole_setup(ssize_t (*read_func)(char *, loff_t, size_t))
{
memconsole_bin_attr.private = read_func;
}
EXPORT_SYMBOL(memconsole_setup);
int memconsole_sysfs_init(void)
{
return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
}
EXPORT_SYMBOL(memconsole_sysfs_init);
void memconsole_exit(void)
{
sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
}
EXPORT_SYMBOL(memconsole_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/google/memconsole.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Broadcom.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#define MAX_SHM_MEM_SZ SZ_4M
#define MAX_TEE_PARAM_ARRY_MEMB 4
enum ta_cmd {
/*
* TA_CMD_BNXT_FASTBOOT - boot bnxt device by copying f/w into sram
*
* param[0] unused
* param[1] unused
* param[2] unused
* param[3] unused
*
* Result:
* TEE_SUCCESS - Invoke command success
* TEE_ERROR_ITEM_NOT_FOUND - Corrupt f/w image found on memory
*/
TA_CMD_BNXT_FASTBOOT = 0,
/*
* TA_CMD_BNXT_COPY_COREDUMP - copy the core dump into shm
*
* param[0] (inout memref) - Coredump buffer memory reference
* param[1] (in value) - value.a: offset, data to be copied from
* value.b: size of data to be copied
* param[2] unused
* param[3] unused
*
* Result:
* TEE_SUCCESS - Invoke command success
* TEE_ERROR_BAD_PARAMETERS - Incorrect input param
* TEE_ERROR_ITEM_NOT_FOUND - Corrupt core dump
*/
TA_CMD_BNXT_COPY_COREDUMP = 3,
};
/**
* struct tee_bnxt_fw_private - OP-TEE bnxt private data
* @dev: OP-TEE based bnxt device.
* @ctx: OP-TEE context handler.
* @session_id: TA session identifier.
*/
struct tee_bnxt_fw_private {
struct device *dev;
struct tee_context *ctx;
u32 session_id;
struct tee_shm *fw_shm_pool;
};
static struct tee_bnxt_fw_private pvt_data;
static void prepare_args(int cmd,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
{
memset(arg, 0, sizeof(*arg));
memset(param, 0, MAX_TEE_PARAM_ARRY_MEMB * sizeof(*param));
arg->func = cmd;
arg->session = pvt_data.session_id;
arg->num_params = MAX_TEE_PARAM_ARRY_MEMB;
/* Fill invoke cmd params */
switch (cmd) {
case TA_CMD_BNXT_COPY_COREDUMP:
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
param[0].u.memref.shm = pvt_data.fw_shm_pool;
param[0].u.memref.size = MAX_SHM_MEM_SZ;
param[0].u.memref.shm_offs = 0;
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
break;
case TA_CMD_BNXT_FASTBOOT:
default:
/* Nothing to do */
break;
}
}
/**
* tee_bnxt_fw_load() - Load the bnxt firmware
* Uses an OP-TEE call to start a secure
* boot process.
* Returns 0 on success, negative errno otherwise.
*/
int tee_bnxt_fw_load(void)
{
int ret = 0;
struct tee_ioctl_invoke_arg arg;
struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
if (!pvt_data.ctx)
return -ENODEV;
prepare_args(TA_CMD_BNXT_FASTBOOT, &arg, param);
ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
if (ret < 0 || arg.ret != 0) {
dev_err(pvt_data.dev,
"TA_CMD_BNXT_FASTBOOT invoke failed TEE err: %x, ret:%x\n",
arg.ret, ret);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(tee_bnxt_fw_load);
/**
* tee_bnxt_copy_coredump() - Copy coredump from the allocated memory
* Uses an OP-TEE call to copy coredump
* @buf: destination buffer where core dump is copied into
* @offset: offset from the base address of core dump area
* @size: size of the dump
*
* Returns 0 on success, negative errno otherwise.
*/
int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size)
{
struct tee_ioctl_invoke_arg arg;
struct tee_param param[MAX_TEE_PARAM_ARRY_MEMB];
void *core_data;
u32 rbytes = size;
u32 nbytes = 0;
int ret = 0;
if (!pvt_data.ctx)
return -ENODEV;
prepare_args(TA_CMD_BNXT_COPY_COREDUMP, &arg, param);
while (rbytes) {
nbytes = rbytes;
nbytes = min_t(u32, rbytes, param[0].u.memref.size);
/* Fill additional invoke cmd params */
param[1].u.value.a = offset;
param[1].u.value.b = nbytes;
ret = tee_client_invoke_func(pvt_data.ctx, &arg, param);
if (ret < 0 || arg.ret != 0) {
dev_err(pvt_data.dev,
"TA_CMD_BNXT_COPY_COREDUMP invoke failed TEE err: %x, ret:%x\n",
arg.ret, ret);
return -EINVAL;
}
core_data = tee_shm_get_va(pvt_data.fw_shm_pool, 0);
if (IS_ERR(core_data)) {
dev_err(pvt_data.dev, "tee_shm_get_va failed\n");
return PTR_ERR(core_data);
}
memcpy(buf, core_data, nbytes);
rbytes -= nbytes;
buf += nbytes;
offset += nbytes;
}
return 0;
}
EXPORT_SYMBOL(tee_bnxt_copy_coredump);
static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
return (ver->impl_id == TEE_IMPL_ID_OPTEE);
}
static int tee_bnxt_fw_probe(struct device *dev)
{
struct tee_client_device *bnxt_device = to_tee_client_device(dev);
int ret, err = -ENODEV;
struct tee_ioctl_open_session_arg sess_arg;
struct tee_shm *fw_shm_pool;
memset(&sess_arg, 0, sizeof(sess_arg));
/* Open context with TEE driver */
pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
NULL);
if (IS_ERR(pvt_data.ctx))
return -ENODEV;
/* Open session with Bnxt load Trusted App */
export_uuid(sess_arg.uuid, &bnxt_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
if (ret < 0 || sess_arg.ret != 0) {
dev_err(dev, "tee_client_open_session failed, err: %x\n",
sess_arg.ret);
err = -EINVAL;
goto out_ctx;
}
pvt_data.session_id = sess_arg.session;
pvt_data.dev = dev;
fw_shm_pool = tee_shm_alloc_kernel_buf(pvt_data.ctx, MAX_SHM_MEM_SZ);
if (IS_ERR(fw_shm_pool)) {
dev_err(pvt_data.dev, "tee_shm_alloc_kernel_buf failed\n");
err = PTR_ERR(fw_shm_pool);
goto out_sess;
}
pvt_data.fw_shm_pool = fw_shm_pool;
return 0;
out_sess:
tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
out_ctx:
tee_client_close_context(pvt_data.ctx);
return err;
}
static int tee_bnxt_fw_remove(struct device *dev)
{
tee_shm_free(pvt_data.fw_shm_pool);
tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
tee_client_close_context(pvt_data.ctx);
pvt_data.ctx = NULL;
return 0;
}
static void tee_bnxt_fw_shutdown(struct device *dev)
{
tee_shm_free(pvt_data.fw_shm_pool);
tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
tee_client_close_context(pvt_data.ctx);
pvt_data.ctx = NULL;
}
static const struct tee_client_device_id tee_bnxt_fw_id_table[] = {
{UUID_INIT(0x6272636D, 0x2019, 0x0716,
0x42, 0x43, 0x4D, 0x5F, 0x53, 0x43, 0x48, 0x49)},
{}
};
MODULE_DEVICE_TABLE(tee, tee_bnxt_fw_id_table);
static struct tee_client_driver tee_bnxt_fw_driver = {
.id_table = tee_bnxt_fw_id_table,
.driver = {
.name = KBUILD_MODNAME,
.bus = &tee_bus_type,
.probe = tee_bnxt_fw_probe,
.remove = tee_bnxt_fw_remove,
.shutdown = tee_bnxt_fw_shutdown,
},
};
static int __init tee_bnxt_fw_mod_init(void)
{
return driver_register(&tee_bnxt_fw_driver.driver);
}
static void __exit tee_bnxt_fw_mod_exit(void)
{
driver_unregister(&tee_bnxt_fw_driver.driver);
}
module_init(tee_bnxt_fw_mod_init);
module_exit(tee_bnxt_fw_mod_exit);
MODULE_AUTHOR("Vikas Gupta <[email protected]>");
MODULE_DESCRIPTION("Broadcom bnxt firmware manager");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/broadcom/tee_bnxt_fw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BCM947xx nvram variable access
*
* Copyright (C) 2005 Broadcom Corporation
* Copyright (C) 2006 Felix Fietkau <[email protected]>
* Copyright (C) 2010-2012 Hauke Mehrtens <[email protected]>
*/
#include <linux/io.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mtd/mtd.h>
#include <linux/bcm47xx_nvram.h>
#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
#define NVRAM_SPACE 0x10000
#define NVRAM_MAX_GPIO_ENTRIES 32
#define NVRAM_MAX_GPIO_VALUE_LEN 30
#define FLASH_MIN 0x00020000 /* Minimum flash size */
struct nvram_header {
u32 magic;
u32 len;
u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
u32 config_ncdl; /* ncdl values for memc */
};
static char nvram_buf[NVRAM_SPACE];
static size_t nvram_len;
static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000};
/**
* bcm47xx_nvram_is_valid - check for a valid NVRAM at specified memory
*/
static bool bcm47xx_nvram_is_valid(void __iomem *nvram)
{
return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC;
}
/**
* bcm47xx_nvram_copy - copy NVRAM to internal buffer
*/
static void bcm47xx_nvram_copy(void __iomem *nvram_start, size_t res_size)
{
struct nvram_header __iomem *header = nvram_start;
size_t copy_size;
copy_size = header->len;
if (copy_size > res_size) {
pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
copy_size = res_size;
}
if (copy_size >= NVRAM_SPACE) {
pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
copy_size, NVRAM_SPACE - 1);
copy_size = NVRAM_SPACE - 1;
}
__ioread32_copy(nvram_buf, nvram_start, DIV_ROUND_UP(copy_size, 4));
nvram_buf[NVRAM_SPACE - 1] = '\0';
nvram_len = copy_size;
}
/**
* bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it
*/
static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size)
{
size_t flash_size;
size_t offset;
int i;
if (nvram_len) {
pr_warn("nvram already initialized\n");
return -EEXIST;
}
/* TODO: when nvram is on nand flash check for bad blocks first. */
/* Try every possible flash size and check for NVRAM at its end */
for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) {
for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
offset = flash_size - nvram_sizes[i];
if (bcm47xx_nvram_is_valid(flash_start + offset))
goto found;
}
}
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
offset = 4096;
if (bcm47xx_nvram_is_valid(flash_start + offset))
goto found;
offset = 1024;
if (bcm47xx_nvram_is_valid(flash_start + offset))
goto found;
pr_err("no nvram found\n");
return -ENXIO;
found:
bcm47xx_nvram_copy(flash_start + offset, res_size - offset);
return 0;
}
int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size)
{
if (nvram_len) {
pr_warn("nvram already initialized\n");
return -EEXIST;
}
if (!bcm47xx_nvram_is_valid(nvram_start)) {
pr_err("No valid NVRAM found\n");
return -ENOENT;
}
bcm47xx_nvram_copy(nvram_start, res_size);
return 0;
}
EXPORT_SYMBOL_GPL(bcm47xx_nvram_init_from_iomem);
/*
* On bcm47xx we need access to the NVRAM very early, so we can't use mtd
* subsystem to access flash. We can't even use platform device / driver to
* store memory offset.
* To handle this we provide following symbol. It's supposed to be called as
* soon as we get info about flash device, before any NVRAM entry is needed.
*/
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
void __iomem *iobase;
int err;
iobase = ioremap(base, lim);
if (!iobase)
return -ENOMEM;
err = bcm47xx_nvram_find_and_copy(iobase, lim);
iounmap(iobase);
return err;
}
static int nvram_init(void)
{
#ifdef CONFIG_MTD
struct mtd_info *mtd;
struct nvram_header header;
size_t bytes_read;
int err;
mtd = get_mtd_device_nm("nvram");
if (IS_ERR(mtd))
return -ENODEV;
err = mtd_read(mtd, 0, sizeof(header), &bytes_read, (uint8_t *)&header);
if (!err && header.magic == NVRAM_MAGIC &&
header.len > sizeof(header)) {
nvram_len = header.len;
if (nvram_len >= NVRAM_SPACE) {
pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
nvram_len, NVRAM_SPACE);
nvram_len = NVRAM_SPACE - 1;
}
err = mtd_read(mtd, 0, nvram_len, &nvram_len,
(u8 *)nvram_buf);
return err;
}
#endif
return -ENXIO;
}
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
{
char *var, *value, *end, *eq;
int err;
if (!name)
return -EINVAL;
if (!nvram_len) {
err = nvram_init();
if (err)
return err;
}
/* Look for name=value and return value */
var = &nvram_buf[sizeof(struct nvram_header)];
end = nvram_buf + sizeof(nvram_buf);
while (var < end && *var) {
eq = strchr(var, '=');
if (!eq)
break;
value = eq + 1;
if (eq - var == strlen(name) &&
strncmp(var, name, eq - var) == 0)
return snprintf(val, val_len, "%s", value);
var = value + strlen(value) + 1;
}
return -ENOENT;
}
EXPORT_SYMBOL(bcm47xx_nvram_getenv);
int bcm47xx_nvram_gpio_pin(const char *name)
{
int i, err;
char nvram_var[] = "gpioXX";
char buf[NVRAM_MAX_GPIO_VALUE_LEN];
/* TODO: Optimize it to don't call getenv so many times */
for (i = 0; i < NVRAM_MAX_GPIO_ENTRIES; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;
err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
if (err <= 0)
continue;
if (!strcmp(name, buf))
return i;
}
return -ENOENT;
}
EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
char *bcm47xx_nvram_get_contents(size_t *nvram_size)
{
int err;
char *nvram;
if (!nvram_len) {
err = nvram_init();
if (err)
return NULL;
}
*nvram_size = nvram_len - sizeof(struct nvram_header);
nvram = vmalloc(*nvram_size);
if (!nvram)
return NULL;
memcpy(nvram, &nvram_buf[sizeof(struct nvram_header)], *nvram_size);
return nvram;
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
| linux-master | drivers/firmware/broadcom/bcm47xx_nvram.c |
/*
* Copyright (C) 2004 Florian Schirmer <[email protected]>
* Copyright (C) 2006 Felix Fietkau <[email protected]>
* Copyright (C) 2006 Michael Buesch <[email protected]>
* Copyright (C) 2010 Waldemar Brodkorb <[email protected]>
* Copyright (C) 2010-2012 Hauke Mehrtens <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bcm47xx_nvram.h>
#include <linux/bcm47xx_sprom.h>
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/ssb/ssb.h>
static void create_key(const char *prefix, const char *postfix,
const char *name, char *buf, int len)
{
if (prefix && postfix)
snprintf(buf, len, "%s%s%s", prefix, name, postfix);
else if (prefix)
snprintf(buf, len, "%s%s", prefix, name);
else if (postfix)
snprintf(buf, len, "%s%s", name, postfix);
else
snprintf(buf, len, "%s", name);
}
static int get_nvram_var(const char *prefix, const char *postfix,
const char *name, char *buf, int len, bool fallback)
{
char key[40];
int err;
create_key(prefix, postfix, name, key, sizeof(key));
err = bcm47xx_nvram_getenv(key, buf, len);
if (fallback && err == -ENOENT && prefix) {
create_key(NULL, postfix, name, key, sizeof(key));
err = bcm47xx_nvram_getenv(key, buf, len);
}
return err;
}
#define NVRAM_READ_VAL(type) \
static void nvram_read_ ## type(const char *prefix, \
const char *postfix, const char *name, \
type *val, type allset, bool fallback) \
{ \
char buf[100]; \
int err; \
type var; \
\
err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
fallback); \
if (err < 0) \
return; \
err = kstrto ## type(strim(buf), 0, &var); \
if (err) { \
pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
prefix, name, postfix, buf, err); \
return; \
} \
if (allset && var == allset) \
return; \
*val = var; \
}
NVRAM_READ_VAL(u8)
NVRAM_READ_VAL(s8)
NVRAM_READ_VAL(u16)
NVRAM_READ_VAL(u32)
#undef NVRAM_READ_VAL
static void nvram_read_u32_2(const char *prefix, const char *name,
u16 *val_lo, u16 *val_hi, bool fallback)
{
char buf[100];
int err;
u32 val;
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
return;
}
*val_lo = (val & 0x0000FFFFU);
*val_hi = (val & 0xFFFF0000U) >> 16;
}
static void nvram_read_leddc(const char *prefix, const char *name,
u8 *leddc_on_time, u8 *leddc_off_time,
bool fallback)
{
char buf[100];
int err;
u32 val;
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
return;
}
if (val == 0xffff || val == 0xffffffff)
return;
*leddc_on_time = val & 0xff;
*leddc_off_time = (val >> 16) & 0xff;
}
static void nvram_read_macaddr(const char *prefix, const char *name,
u8 val[6], bool fallback)
{
char buf[100];
int err;
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
strreplace(buf, '-', ':');
if (!mac_pton(buf, val))
pr_warn("Can not parse mac address: %s\n", buf);
}
static void nvram_read_alpha2(const char *prefix, const char *name,
char val[2], bool fallback)
{
char buf[10];
int err;
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
if (buf[0] == '0')
return;
if (strlen(buf) > 2) {
pr_warn("alpha2 is too long %s\n", buf);
return;
}
memcpy(val, buf, 2);
}
/* This is one-function-only macro, it uses local "sprom" variable! */
#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
if (_revmask & BIT(sprom->revision)) \
nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
_allset, _fallback)
/*
* Special version of filling function that can be safely called for any SPROM
* revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
* for which the mapping is valid.
* It obviously requires some hexadecimal/bitmasks knowledge, but allows
* writing cleaner code (easy revisions handling).
* Note that while SPROM revision 0 was never used, we still keep BIT(0)
* reserved for it, just to keep numbering sane.
*/
static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
const char *pre = prefix;
bool fb = fallback;
/* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
/* TODO: rev 11 support */
ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
/* TODO: rev 11 support */
ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
char postfix[2];
int i;
for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
struct ssb_sprom_core_pwr_info *pwr_info;
pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u8(prefix, postfix, "maxp2ga",
&pwr_info->maxpwr_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt2ga",
&pwr_info->itssi_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt5ga",
&pwr_info->itssi_5g, 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw0a",
&pwr_info->pa_2g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw1a",
&pwr_info->pa_2g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw2a",
&pwr_info->pa_2g[2], 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5ga",
&pwr_info->maxpwr_5g, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gha",
&pwr_info->maxpwr_5gh, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gla",
&pwr_info->maxpwr_5gl, 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw0a",
&pwr_info->pa_5g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw1a",
&pwr_info->pa_5g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw2a",
&pwr_info->pa_5g[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw0a",
&pwr_info->pa_5gl[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw1a",
&pwr_info->pa_5gl[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw2a",
&pwr_info->pa_5gl[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw0a",
&pwr_info->pa_5gh[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw1a",
&pwr_info->pa_5gh[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw2a",
&pwr_info->pa_5gh[2], 0, fallback);
}
}
static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
char postfix[2];
int i;
for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
struct ssb_sprom_core_pwr_info *pwr_info;
pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u16(prefix, postfix, "pa2gw3a",
&pwr_info->pa_2g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw3a",
&pwr_info->pa_5g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw3a",
&pwr_info->pa_5gl[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw3a",
&pwr_info->pa_5gh[3], 0, fallback);
}
}
static bool bcm47xx_is_valid_mac(u8 *mac)
{
return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
}
static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
{
u8 *oui = mac + ETH_ALEN/2 - 1;
u8 *p = mac + ETH_ALEN - 1;
do {
(*p) += num;
if (*p > num)
break;
p--;
num = 1;
} while (p != oui);
if (p == oui) {
pr_err("unable to fetch mac address\n");
return -ENOENT;
}
return 0;
}
static int mac_addr_used = 2;
static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
bool fb = fallback;
nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
fallback);
nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
fallback);
nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
fallback);
nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
fallback);
nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
/* The address prefix 00:90:4C is used by Broadcom in their initial
* configuration. When a mac address with the prefix 00:90:4C is used
* all devices from the same series are sharing the same mac address.
* To prevent mac address collisions we replace them with a mac address
* based on the base address.
*/
if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
u8 mac[6];
nvram_read_macaddr(NULL, "et0macaddr", mac, false);
if (bcm47xx_is_valid_mac(mac)) {
int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
if (!err) {
ether_addr_copy(sprom->il0mac, mac);
mac_addr_used++;
}
}
}
}
static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi, fallback);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
&sprom->boardflags2_hi, fallback);
}
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
bcm47xx_fill_board_data(sprom, prefix, fallback);
nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
/* Entries requiring custom functions */
nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
if (sprom->revision >= 3)
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
&sprom->leddc_off_time, fallback);
switch (sprom->revision) {
case 4:
case 5:
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
break;
case 8:
case 9:
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
}
bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
}
#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
{
char prefix[10];
switch (bus->bustype) {
case SSB_BUSTYPE_SSB:
bcm47xx_fill_sprom(out, NULL, false);
return 0;
case SSB_BUSTYPE_PCI:
memset(out, 0, sizeof(struct ssb_sprom));
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
bcm47xx_fill_sprom(out, prefix, false);
return 0;
default:
pr_warn("Unable to fill SPROM for given bustype.\n");
return -EINVAL;
}
}
#endif
#if IS_BUILTIN(CONFIG_BCMA)
/*
* Having many NVRAM entries for PCI devices led to repeating prefixes like
* pci/1/1/ all the time and wasting flash space. So at some point Broadcom
* decided to introduce prefixes like 0: 1: 2: etc.
* If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
* instead of pci/2/1/.
*/
static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
{
size_t prefix_len = strlen(prefix);
size_t short_len = prefix_len - 1;
char nvram_var[10];
char buf[20];
int i;
/* Passed prefix has to end with a slash */
if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
return;
for (i = 0; i < 3; i++) {
if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
continue;
if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
continue;
if (!strcmp(buf, prefix) ||
(short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
snprintf(prefix, prefix_size, "%d:", i);
return;
}
}
}
static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
{
struct bcma_boardinfo *binfo = &bus->boardinfo;
struct bcma_device *core;
char buf[10];
char *prefix;
bool fallback = false;
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
memset(out, 0, sizeof(struct ssb_sprom));
/* On BCM47XX all PCI buses share the same domain */
if (IS_ENABLED(CONFIG_BCM47XX))
snprintf(buf, sizeof(buf), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
else
snprintf(buf, sizeof(buf), "pci/%u/%u/",
pci_domain_nr(bus->host_pci->bus) + 1,
bus->host_pci->bus->number);
bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
prefix = buf;
break;
case BCMA_HOSTTYPE_SOC:
memset(out, 0, sizeof(struct ssb_sprom));
core = bcma_find_core(bus, BCMA_CORE_80211);
if (core) {
snprintf(buf, sizeof(buf), "sb/%u/",
core->core_index);
prefix = buf;
fallback = true;
} else {
prefix = NULL;
}
break;
default:
pr_warn("Unable to fill SPROM for given bustype.\n");
return -EINVAL;
}
nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
if (!binfo->vendor)
binfo->vendor = SSB_BOARDVENDOR_BCM;
nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
bcm47xx_fill_sprom(out, prefix, fallback);
return 0;
}
#endif
static unsigned int bcm47xx_sprom_registered;
/*
* On bcm47xx we need to register SPROM fallback handler very early, so we can't
* use anything like platform device / driver for this.
*/
int bcm47xx_sprom_register_fallbacks(void)
{
if (bcm47xx_sprom_registered)
return 0;
#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
pr_warn("Failed to register ssb SPROM handler\n");
#endif
#if IS_BUILTIN(CONFIG_BCMA)
if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
pr_warn("Failed to register bcma SPROM handler\n");
#endif
bcm47xx_sprom_registered = 1;
return 0;
}
fs_initcall(bcm47xx_sprom_register_fallbacks);
| linux-master | drivers/firmware/broadcom/bcm47xx_sprom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 Arm Limited
*/
#define pr_fmt(fmt) "SMCCC: SOC_ID: " fmt
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#define SMCCC_SOC_ID_JEP106_BANK_IDX_MASK GENMASK(30, 24)
/*
* As per the SMC Calling Convention specification v1.2 (ARM DEN 0028C)
* Section 7.4 SMCCC_ARCH_SOC_ID bits[23:16] are JEP-106 identification
* code with parity bit for the SiP. We can drop the parity bit.
*/
#define SMCCC_SOC_ID_JEP106_ID_CODE_MASK GENMASK(22, 16)
#define SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK GENMASK(15, 0)
#define JEP106_BANK_CONT_CODE(x) \
(u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_BANK_IDX_MASK, (x)))
#define JEP106_ID_CODE(x) \
(u8)(FIELD_GET(SMCCC_SOC_ID_JEP106_ID_CODE_MASK, (x)))
#define IMP_DEF_SOC_ID(x) \
(u16)(FIELD_GET(SMCCC_SOC_ID_IMP_DEF_SOC_ID_MASK, (x)))
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
static char soc_id_str[20], soc_id_rev_str[12];
static char soc_id_jep106_id_str[12];
if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
return 0;
soc_id_version = arm_smccc_get_soc_id_version();
if (soc_id_version == SMCCC_RET_NOT_SUPPORTED) {
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
return 0;
}
if (soc_id_version < 0) {
pr_err("Invalid SoC Version: %x\n", soc_id_version);
return -EINVAL;
}
soc_id_rev = arm_smccc_get_soc_id_revision();
if (soc_id_rev < 0) {
pr_err("Invalid SoC Revision: %x\n", soc_id_rev);
return -EINVAL;
}
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
sprintf(soc_id_rev_str, "0x%08x", soc_id_rev);
sprintf(soc_id_jep106_id_str, "jep106:%02x%02x",
JEP106_BANK_CONT_CODE(soc_id_version),
JEP106_ID_CODE(soc_id_version));
sprintf(soc_id_str, "%s:%04x", soc_id_jep106_id_str,
IMP_DEF_SOC_ID(soc_id_version));
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr);
return PTR_ERR(soc_dev);
}
pr_info("ID = %s Revision = %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
return 0;
}
module_init(smccc_soc_init);
static void __exit smccc_soc_exit(void)
{
if (soc_dev)
soc_device_unregister(soc_dev);
kfree(soc_dev_attr);
}
module_exit(smccc_soc_exit);
| linux-master | drivers/firmware/smccc/soc_id.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "smccc: KVM: " fmt
#include <linux/arm-smccc.h>
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
void __init kvm_init_hyp_services(void)
{
struct arm_smccc_res res;
u32 val[4];
if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC)
return;
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 ||
res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 ||
res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 ||
res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3)
return;
memset(&res, 0, sizeof(res));
arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res);
val[0] = lower_32_bits(res.a0);
val[1] = lower_32_bits(res.a1);
val[2] = lower_32_bits(res.a2);
val[3] = lower_32_bits(res.a3);
bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS);
pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n",
res.a3, res.a2, res.a1, res.a0);
}
bool kvm_arm_hyp_service_available(u32 func_id)
{
if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS)
return false;
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
| linux-master | drivers/firmware/smccc/kvm_guest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Arm Limited
*/
#define pr_fmt(fmt) "smccc: " fmt
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/arm-smccc.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/archrandom.h>
static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE;
bool __ro_after_init smccc_trng_available = false;
u64 __ro_after_init smccc_has_sve_hint = false;
s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED;
s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED;
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit)
{
struct arm_smccc_res res;
smccc_version = version;
smccc_conduit = conduit;
smccc_trng_available = smccc_probe_trng();
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
smccc_version >= ARM_SMCCC_VERSION_1_3)
smccc_has_sve_hint = true;
if ((smccc_version >= ARM_SMCCC_VERSION_1_2) &&
(smccc_conduit != SMCCC_CONDUIT_NONE)) {
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_SOC_ID, &res);
if ((s32)res.a0 >= 0) {
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
smccc_soc_id_version = (s32)res.a0;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
smccc_soc_id_revision = (s32)res.a0;
}
}
}
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
{
if (smccc_version < ARM_SMCCC_VERSION_1_1)
return SMCCC_CONDUIT_NONE;
return smccc_conduit;
}
EXPORT_SYMBOL_GPL(arm_smccc_1_1_get_conduit);
u32 arm_smccc_get_version(void)
{
return smccc_version;
}
EXPORT_SYMBOL_GPL(arm_smccc_get_version);
s32 arm_smccc_get_soc_id_version(void)
{
return smccc_soc_id_version;
}
s32 arm_smccc_get_soc_id_revision(void)
{
return smccc_soc_id_revision;
}
static int __init smccc_devices_init(void)
{
struct platform_device *pdev;
if (smccc_trng_available) {
pdev = platform_device_register_simple("smccc_trng", -1,
NULL, 0);
if (IS_ERR(pdev))
pr_err("smccc_trng: could not register device: %ld\n",
PTR_ERR(pdev));
}
return 0;
}
device_initcall(smccc_devices_init);
| linux-master | drivers/firmware/smccc/smccc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*/
#include <soc/tegra/ivc.h>
#define TEGRA_IVC_ALIGN 64
/*
* IVC channel reset protocol.
*
* Each end uses its tx_channel.state to indicate its synchronization state.
*/
enum tegra_ivc_state {
/*
* This value is zero for backwards compatibility with services that
* assume channels to be initially zeroed. Such channels are in an
* initially valid state, but cannot be asynchronously reset, and must
* maintain a valid state at all times.
*
* The transmitting end can enter the established state from the sync or
* ack state when it observes the receiving endpoint in the ack or
* established state, indicating that has cleared the counters in our
* rx_channel.
*/
TEGRA_IVC_STATE_ESTABLISHED = 0,
/*
* If an endpoint is observed in the sync state, the remote endpoint is
* allowed to clear the counters it owns asynchronously with respect to
* the current endpoint. Therefore, the current endpoint is no longer
* allowed to communicate.
*/
TEGRA_IVC_STATE_SYNC,
/*
* When the transmitting end observes the receiving end in the sync
* state, it can clear the w_count and r_count and transition to the ack
* state. If the remote endpoint observes us in the ack state, it can
* return to the established state once it has cleared its counters.
*/
TEGRA_IVC_STATE_ACK
};
/*
* This structure is divided into two-cache aligned parts, the first is only
* written through the tx.channel pointer, while the second is only written
* through the rx.channel pointer. This delineates ownership of the cache
* lines, which is critical to performance and necessary in non-cache coherent
* implementations.
*/
struct tegra_ivc_header {
union {
struct {
/* fields owned by the transmitting end */
u32 count;
u32 state;
};
u8 pad[TEGRA_IVC_ALIGN];
} tx;
union {
/* fields owned by the receiving end */
u32 count;
u8 pad[TEGRA_IVC_ALIGN];
} rx;
};
#define tegra_ivc_header_read_field(hdr, field) \
iosys_map_rd_field(hdr, 0, struct tegra_ivc_header, field)
#define tegra_ivc_header_write_field(hdr, field, value) \
iosys_map_wr_field(hdr, 0, struct tegra_ivc_header, field, value)
static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
{
if (!ivc->peer)
return;
dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
DMA_FROM_DEVICE);
}
static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
{
if (!ivc->peer)
return;
dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
DMA_TO_DEVICE);
}
static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, struct iosys_map *map)
{
/*
* This function performs multiple checks on the same values with
* security implications, so create snapshots with READ_ONCE() to
* ensure that these checks use the same values.
*/
u32 tx = tegra_ivc_header_read_field(map, tx.count);
u32 rx = tegra_ivc_header_read_field(map, rx.count);
/*
* Perform an over-full check to prevent denial of service attacks
* where a server could be easily fooled into believing that there's
* an extremely large number of frames ready, since receivers are not
* expected to check for full or over-full conditions.
*
* Although the channel isn't empty, this is an invalid case caused by
* a potentially malicious peer, so returning empty is safer, because
* it gives the impression that the channel has gone silent.
*/
if (tx - rx > ivc->num_frames)
return true;
return tx == rx;
}
static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct iosys_map *map)
{
u32 tx = tegra_ivc_header_read_field(map, tx.count);
u32 rx = tegra_ivc_header_read_field(map, rx.count);
/*
* Invalid cases where the counters indicate that the queue is over
* capacity also appear full.
*/
return tx - rx >= ivc->num_frames;
}
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct iosys_map *map)
{
u32 tx = tegra_ivc_header_read_field(map, tx.count);
u32 rx = tegra_ivc_header_read_field(map, rx.count);
/*
* This function isn't expected to be used in scenarios where an
* over-full situation can lead to denial of service attacks. See the
* comment in tegra_ivc_empty() for an explanation about special
* over-full considerations.
*/
return tx - rx;
}
static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
{
unsigned int count = tegra_ivc_header_read_field(&ivc->tx.map, tx.count);
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, count + 1);
if (ivc->tx.position == ivc->num_frames - 1)
ivc->tx.position = 0;
else
ivc->tx.position++;
}
static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
{
unsigned int count = tegra_ivc_header_read_field(&ivc->rx.map, rx.count);
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, count + 1);
if (ivc->rx.position == ivc->num_frames - 1)
ivc->rx.position = 0;
else
ivc->rx.position++;
}
static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
{
unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
unsigned int state;
/*
* tx.channel->state is set locally, so it is not synchronized with
* state from the remote peer. The remote peer cannot reset its
* transmit counters until we've acknowledged its synchronization
* request, so no additional synchronization is required because an
* asynchronous transition of rx.channel->state to
* TEGRA_IVC_STATE_ACK is not allowed.
*/
state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
if (state != TEGRA_IVC_STATE_ESTABLISHED)
return -ECONNRESET;
/*
* Avoid unnecessary invalidations when performing repeated accesses
* to an IVC channel by checking the old queue pointers first.
*
* Synchronization is only necessary when these pointers indicate
* empty or full.
*/
if (!tegra_ivc_empty(ivc, &ivc->rx.map))
return 0;
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
if (tegra_ivc_empty(ivc, &ivc->rx.map))
return -ENOSPC;
return 0;
}
static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
{
unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
unsigned int state;
state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
if (state != TEGRA_IVC_STATE_ESTABLISHED)
return -ECONNRESET;
if (!tegra_ivc_full(ivc, &ivc->tx.map))
return 0;
tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
if (tegra_ivc_full(ivc, &ivc->tx.map))
return -ENOSPC;
return 0;
}
static int tegra_ivc_frame_virt(struct tegra_ivc *ivc, const struct iosys_map *header,
unsigned int frame, struct iosys_map *map)
{
size_t offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
if (WARN_ON(frame >= ivc->num_frames))
return -EINVAL;
*map = IOSYS_MAP_INIT_OFFSET(header, offset);
return 0;
}
static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
dma_addr_t phys,
unsigned int frame)
{
unsigned long offset;
offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
return phys + offset;
}
static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
dma_addr_t phys,
unsigned int frame,
unsigned int offset,
size_t size)
{
if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
return;
phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
}
static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
dma_addr_t phys,
unsigned int frame,
unsigned int offset,
size_t size)
{
if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
return;
phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
}
/* directly peek at the next frame rx'ed */
int tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
{
int err;
if (WARN_ON(ivc == NULL))
return -EINVAL;
err = tegra_ivc_check_read(ivc);
if (err < 0)
return err;
/*
* Order observation of ivc->rx.position potentially indicating new
* data before data read.
*/
smp_rmb();
tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
ivc->frame_size);
return tegra_ivc_frame_virt(ivc, &ivc->rx.map, ivc->rx.position, map);
}
EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
int tegra_ivc_read_advance(struct tegra_ivc *ivc)
{
unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
int err;
/*
* No read barriers or synchronization here: the caller is expected to
* have already observed the channel non-empty. This check is just to
* catch programming errors.
*/
err = tegra_ivc_check_read(ivc);
if (err < 0)
return err;
tegra_ivc_advance_rx(ivc);
tegra_ivc_flush(ivc, ivc->rx.phys + rx);
/*
* Ensure our write to ivc->rx.position occurs before our read from
* ivc->tx.position.
*/
smp_mb();
/*
* Notify only upon transition from full to non-full. The available
* count can only asynchronously increase, so the worst possible
* side-effect will be a spurious notification.
*/
tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
if (tegra_ivc_available(ivc, &ivc->rx.map) == ivc->num_frames - 1)
ivc->notify(ivc, ivc->notify_data);
return 0;
}
EXPORT_SYMBOL(tegra_ivc_read_advance);
/* directly poke at the next frame to be tx'ed */
int tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc, struct iosys_map *map)
{
int err;
err = tegra_ivc_check_write(ivc);
if (err < 0)
return err;
return tegra_ivc_frame_virt(ivc, &ivc->tx.map, ivc->tx.position, map);
}
EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
/* advance the tx buffer */
int tegra_ivc_write_advance(struct tegra_ivc *ivc)
{
unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
int err;
err = tegra_ivc_check_write(ivc);
if (err < 0)
return err;
tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
ivc->frame_size);
/*
* Order any possible stores to the frame before update of
* ivc->tx.position.
*/
smp_wmb();
tegra_ivc_advance_tx(ivc);
tegra_ivc_flush(ivc, ivc->tx.phys + tx);
/*
* Ensure our write to ivc->tx.position occurs before our read from
* ivc->rx.position.
*/
smp_mb();
/*
* Notify only upon transition from empty to non-empty. The available
* count can only asynchronously decrease, so the worst possible
* side-effect will be a spurious notification.
*/
tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
if (tegra_ivc_available(ivc, &ivc->tx.map) == 1)
ivc->notify(ivc, ivc->notify_data);
return 0;
}
EXPORT_SYMBOL(tegra_ivc_write_advance);
void tegra_ivc_reset(struct tegra_ivc *ivc)
{
unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_SYNC);
tegra_ivc_flush(ivc, ivc->tx.phys + offset);
ivc->notify(ivc, ivc->notify_data);
}
EXPORT_SYMBOL(tegra_ivc_reset);
/*
* =======================================================
* IVC State Transition Table - see tegra_ivc_notified()
* =======================================================
*
* local remote action
* ----- ------ -----------------------------------
* SYNC EST <none>
* SYNC ACK reset counters; move to EST; notify
* SYNC SYNC reset counters; move to ACK; notify
* ACK EST move to EST; notify
* ACK ACK move to EST; notify
* ACK SYNC reset counters; move to ACK; notify
* EST EST <none>
* EST ACK <none>
* EST SYNC reset counters; move to ACK; notify
*
* ===============================================================
*/
int tegra_ivc_notified(struct tegra_ivc *ivc)
{
unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
enum tegra_ivc_state rx_state, tx_state;
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
rx_state = tegra_ivc_header_read_field(&ivc->rx.map, tx.state);
tx_state = tegra_ivc_header_read_field(&ivc->tx.map, tx.state);
if (rx_state == TEGRA_IVC_STATE_SYNC) {
offset = offsetof(struct tegra_ivc_header, tx.count);
/*
* Order observation of TEGRA_IVC_STATE_SYNC before stores
* clearing tx.channel.
*/
smp_rmb();
/*
* Reset tx.channel counters. The remote end is in the SYNC
* state and won't make progress until we change our state,
* so the counters are not in use at this time.
*/
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
ivc->tx.position = 0;
ivc->rx.position = 0;
/*
* Ensure that counters appear cleared before new state can be
* observed.
*/
smp_wmb();
/*
* Move to ACK state. We have just cleared our counters, so it
* is now safe for the remote end to start using these values.
*/
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ACK);
tegra_ivc_flush(ivc, ivc->tx.phys + offset);
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc, ivc->notify_data);
} else if (tx_state == TEGRA_IVC_STATE_SYNC &&
rx_state == TEGRA_IVC_STATE_ACK) {
offset = offsetof(struct tegra_ivc_header, tx.count);
/*
* Order observation of ivc_state_sync before stores clearing
* tx_channel.
*/
smp_rmb();
/*
* Reset tx.channel counters. The remote end is in the ACK
* state and won't make progress until we change our state,
* so the counters are not in use at this time.
*/
tegra_ivc_header_write_field(&ivc->tx.map, tx.count, 0);
tegra_ivc_header_write_field(&ivc->rx.map, rx.count, 0);
ivc->tx.position = 0;
ivc->rx.position = 0;
/*
* Ensure that counters appear cleared before new state can be
* observed.
*/
smp_wmb();
/*
* Move to ESTABLISHED state. We know that the remote end has
* already cleared its counters, so it is safe to start
* writing/reading on this channel.
*/
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
tegra_ivc_flush(ivc, ivc->tx.phys + offset);
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc, ivc->notify_data);
} else if (tx_state == TEGRA_IVC_STATE_ACK) {
offset = offsetof(struct tegra_ivc_header, tx.count);
/*
* At this point, we have observed the peer to be in either
* the ACK or ESTABLISHED state. Next, order observation of
* peer state before storing to tx.channel.
*/
smp_rmb();
/*
* Move to ESTABLISHED state. We know that we have previously
* cleared our counters, and we know that the remote end has
* cleared its counters, so it is safe to start writing/reading
* on this channel.
*/
tegra_ivc_header_write_field(&ivc->tx.map, tx.state, TEGRA_IVC_STATE_ESTABLISHED);
tegra_ivc_flush(ivc, ivc->tx.phys + offset);
/*
* Notify remote end to observe state transition.
*/
ivc->notify(ivc, ivc->notify_data);
} else {
/*
* There is no need to handle any further action. Either the
* channel is already fully established, or we are waiting for
* the remote end to catch up with our current state. Refer
* to the diagram in "IVC State Transition Table" above.
*/
}
if (tx_state != TEGRA_IVC_STATE_ESTABLISHED)
return -EAGAIN;
return 0;
}
EXPORT_SYMBOL(tegra_ivc_notified);
size_t tegra_ivc_align(size_t size)
{
return ALIGN(size, TEGRA_IVC_ALIGN);
}
EXPORT_SYMBOL(tegra_ivc_align);
unsigned tegra_ivc_total_queue_size(unsigned queue_size)
{
if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
__func__, queue_size, TEGRA_IVC_ALIGN);
return 0;
}
return queue_size + sizeof(struct tegra_ivc_header);
}
EXPORT_SYMBOL(tegra_ivc_total_queue_size);
static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
unsigned int num_frames, size_t frame_size)
{
BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
TEGRA_IVC_ALIGN));
BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
TEGRA_IVC_ALIGN));
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
TEGRA_IVC_ALIGN));
if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
pr_err("num_frames * frame_size overflows\n");
return -EINVAL;
}
if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
pr_err("frame size not adequately aligned: %zu\n", frame_size);
return -EINVAL;
}
/*
* The headers must at least be aligned enough for counters
* to be accessed atomically.
*/
if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
pr_err("IVC channel start not aligned: %#lx\n", rx);
return -EINVAL;
}
if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
pr_err("IVC channel start not aligned: %#lx\n", tx);
return -EINVAL;
}
if (rx < tx) {
if (rx + frame_size * num_frames > tx) {
pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
rx, frame_size * num_frames, tx);
return -EINVAL;
}
} else {
if (tx + frame_size * num_frames > rx) {
pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
tx, frame_size * num_frames, rx);
return -EINVAL;
}
}
return 0;
}
static inline void iosys_map_copy(struct iosys_map *dst, const struct iosys_map *src)
{
*dst = *src;
}
static inline unsigned long iosys_map_get_address(const struct iosys_map *map)
{
if (map->is_iomem)
return (unsigned long)map->vaddr_iomem;
return (unsigned long)map->vaddr;
}
static inline void *iosys_map_get_vaddr(const struct iosys_map *map)
{
if (WARN_ON(map->is_iomem))
return NULL;
return map->vaddr;
}
int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, const struct iosys_map *rx,
dma_addr_t rx_phys, const struct iosys_map *tx, dma_addr_t tx_phys,
unsigned int num_frames, size_t frame_size,
void (*notify)(struct tegra_ivc *ivc, void *data),
void *data)
{
size_t queue_size;
int err;
if (WARN_ON(!ivc || !notify))
return -EINVAL;
/*
* All sizes that can be returned by communication functions should
* fit in an int.
*/
if (frame_size > INT_MAX)
return -E2BIG;
err = tegra_ivc_check_params(iosys_map_get_address(rx), iosys_map_get_address(tx),
num_frames, frame_size);
if (err < 0)
return err;
queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
if (peer) {
ivc->rx.phys = dma_map_single(peer, iosys_map_get_vaddr(rx), queue_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(peer, ivc->rx.phys))
return -ENOMEM;
ivc->tx.phys = dma_map_single(peer, iosys_map_get_vaddr(tx), queue_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(peer, ivc->tx.phys)) {
dma_unmap_single(peer, ivc->rx.phys, queue_size,
DMA_BIDIRECTIONAL);
return -ENOMEM;
}
} else {
ivc->rx.phys = rx_phys;
ivc->tx.phys = tx_phys;
}
iosys_map_copy(&ivc->rx.map, rx);
iosys_map_copy(&ivc->tx.map, tx);
ivc->peer = peer;
ivc->notify = notify;
ivc->notify_data = data;
ivc->frame_size = frame_size;
ivc->num_frames = num_frames;
/*
* These values aren't necessarily correct until the channel has been
* reset.
*/
ivc->tx.position = 0;
ivc->rx.position = 0;
return 0;
}
EXPORT_SYMBOL(tegra_ivc_init);
void tegra_ivc_cleanup(struct tegra_ivc *ivc)
{
if (ivc->peer) {
size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
ivc->frame_size);
dma_unmap_single(ivc->peer, ivc->rx.phys, size,
DMA_BIDIRECTIONAL);
dma_unmap_single(ivc->peer, ivc->tx.phys, size,
DMA_BIDIRECTIONAL);
}
}
EXPORT_SYMBOL(tegra_ivc_cleanup);
| linux-master | drivers/firmware/tegra/ivc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
static DEFINE_MUTEX(bpmp_debug_lock);
struct seqbuf {
char *buf;
size_t pos;
size_t size;
};
static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
{
seqbuf->buf = buf;
seqbuf->size = size;
seqbuf->pos = 0;
}
static size_t seqbuf_avail(struct seqbuf *seqbuf)
{
return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
}
static size_t seqbuf_status(struct seqbuf *seqbuf)
{
return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
}
static int seqbuf_eof(struct seqbuf *seqbuf)
{
return seqbuf->pos >= seqbuf->size;
}
static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
{
nbyte = min(nbyte, seqbuf_avail(seqbuf));
memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
seqbuf->pos += nbyte;
return seqbuf_status(seqbuf);
}
static int seqbuf_read_u32(struct seqbuf *seqbuf, u32 *v)
{
return seqbuf_read(seqbuf, v, 4);
}
static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
{
*str = seqbuf->buf + seqbuf->pos;
seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
seqbuf->pos++;
return seqbuf_status(seqbuf);
}
static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
{
seqbuf->pos += offset;
}
/* map filename in Linux debugfs to corresponding entry in BPMP */
static const char *get_filename(struct tegra_bpmp *bpmp,
const struct file *file, char *buf, int size)
{
const char *root_path, *filename = NULL;
char *root_path_buf;
size_t root_len;
size_t root_path_buf_len = 512;
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
goto out;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
root_path_buf_len);
if (IS_ERR(root_path))
goto out;
root_len = strlen(root_path);
filename = dentry_path(file->f_path.dentry, buf, size);
if (IS_ERR(filename)) {
filename = NULL;
goto out;
}
if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
filename = NULL;
goto out;
}
filename += root_len;
out:
kfree(root_path_buf);
return filename;
}
static int mrq_debug_open(struct tegra_bpmp *bpmp, const char *name,
u32 *fd, u32 *len, bool write)
{
struct mrq_debug_request req = {
.cmd = write ? CMD_DEBUG_OPEN_WO : CMD_DEBUG_OPEN_RO,
};
struct mrq_debug_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUG,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
ssize_t sz_name;
int err = 0;
sz_name = strscpy(req.fop.name, name, sizeof(req.fop.name));
if (sz_name < 0) {
pr_err("File name too large: %s\n", name);
return -EINVAL;
}
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
*len = resp.fop.datalen;
*fd = resp.fop.fd;
return 0;
}
static int mrq_debug_close(struct tegra_bpmp *bpmp, u32 fd)
{
struct mrq_debug_request req = {
.cmd = CMD_DEBUG_CLOSE,
.frd = {
.fd = fd,
},
};
struct mrq_debug_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUG,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
int err = 0;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
return 0;
}
static int mrq_debug_read(struct tegra_bpmp *bpmp, const char *name,
char *data, size_t sz_data, u32 *nbytes)
{
struct mrq_debug_request req = {
.cmd = CMD_DEBUG_READ,
};
struct mrq_debug_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUG,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
u32 fd = 0, len = 0;
int remaining, err, close_err;
mutex_lock(&bpmp_debug_lock);
err = mrq_debug_open(bpmp, name, &fd, &len, 0);
if (err)
goto out;
if (len > sz_data) {
err = -EFBIG;
goto close;
}
req.frd.fd = fd;
remaining = len;
while (remaining > 0) {
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0) {
goto close;
} else if (msg.rx.ret < 0) {
err = -EINVAL;
goto close;
}
if (resp.frd.readlen > remaining) {
pr_err("%s: read data length invalid\n", __func__);
err = -EINVAL;
goto close;
}
memcpy(data, resp.frd.data, resp.frd.readlen);
data += resp.frd.readlen;
remaining -= resp.frd.readlen;
}
*nbytes = len;
close:
close_err = mrq_debug_close(bpmp, fd);
if (!err)
err = close_err;
out:
mutex_unlock(&bpmp_debug_lock);
return err;
}
static int mrq_debug_write(struct tegra_bpmp *bpmp, const char *name,
uint8_t *data, size_t sz_data)
{
struct mrq_debug_request req = {
.cmd = CMD_DEBUG_WRITE
};
struct mrq_debug_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUG,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
u32 fd = 0, len = 0;
size_t remaining;
int err;
mutex_lock(&bpmp_debug_lock);
err = mrq_debug_open(bpmp, name, &fd, &len, 1);
if (err)
goto out;
if (sz_data > len) {
err = -EINVAL;
goto close;
}
req.fwr.fd = fd;
remaining = sz_data;
while (remaining > 0) {
len = min(remaining, sizeof(req.fwr.data));
memcpy(req.fwr.data, data, len);
req.fwr.datalen = len;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0) {
goto close;
} else if (msg.rx.ret < 0) {
err = -EINVAL;
goto close;
}
data += req.fwr.datalen;
remaining -= req.fwr.datalen;
}
close:
err = mrq_debug_close(bpmp, fd);
out:
mutex_unlock(&bpmp_debug_lock);
return err;
}
static int bpmp_debug_show(struct seq_file *m, void *p)
{
struct file *file = m->private;
struct inode *inode = file_inode(file);
struct tegra_bpmp *bpmp = inode->i_private;
char fnamebuf[256];
const char *filename;
struct mrq_debug_request req = {
.cmd = CMD_DEBUG_READ,
};
struct mrq_debug_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUG,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
u32 fd = 0, len = 0;
int remaining, err, close_err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
return -ENOENT;
mutex_lock(&bpmp_debug_lock);
err = mrq_debug_open(bpmp, filename, &fd, &len, 0);
if (err)
goto out;
req.frd.fd = fd;
remaining = len;
while (remaining > 0) {
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0) {
goto close;
} else if (msg.rx.ret < 0) {
err = -EINVAL;
goto close;
}
if (resp.frd.readlen > remaining) {
pr_err("%s: read data length invalid\n", __func__);
err = -EINVAL;
goto close;
}
seq_write(m, resp.frd.data, resp.frd.readlen);
remaining -= resp.frd.readlen;
}
close:
close_err = mrq_debug_close(bpmp, fd);
if (!err)
err = close_err;
out:
mutex_unlock(&bpmp_debug_lock);
return err;
}
static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct inode *inode = file_inode(file);
struct tegra_bpmp *bpmp = inode->i_private;
char *databuf = NULL;
char fnamebuf[256];
const char *filename;
ssize_t err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
return -ENOENT;
databuf = memdup_user(buf, count);
if (IS_ERR(databuf))
return PTR_ERR(databuf);
err = mrq_debug_write(bpmp, filename, databuf, count);
kfree(databuf);
return err ?: count;
}
static int bpmp_debug_open(struct inode *inode, struct file *file)
{
return single_open_size(file, bpmp_debug_show, file, SZ_256K);
}
static const struct file_operations bpmp_debug_fops = {
.open = bpmp_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.write = bpmp_debug_store,
.release = single_release,
};
static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
struct dentry *parent,
char *ppath)
{
const size_t pathlen = SZ_256;
const size_t bufsize = SZ_16K;
struct dentry *dentry;
u32 dsize, attrs = 0;
struct seqbuf seqbuf;
char *buf, *pathbuf;
const char *name;
int err = 0;
if (!bpmp || !parent || !ppath)
return -EINVAL;
buf = kmalloc(bufsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pathbuf = kzalloc(pathlen, GFP_KERNEL);
if (!pathbuf) {
kfree(buf);
return -ENOMEM;
}
err = mrq_debug_read(bpmp, ppath, buf, bufsize, &dsize);
if (err)
goto out;
seqbuf_init(&seqbuf, buf, dsize);
while (!seqbuf_eof(&seqbuf)) {
err = seqbuf_read_u32(&seqbuf, &attrs);
if (err)
goto out;
err = seqbuf_read_str(&seqbuf, &name);
if (err < 0)
goto out;
if (attrs & DEBUGFS_S_ISDIR) {
size_t len;
dentry = debugfs_create_dir(name, parent);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out;
}
len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
if (len >= pathlen) {
err = -EINVAL;
goto out;
}
err = bpmp_populate_debugfs_inband(bpmp, dentry,
pathbuf);
if (err < 0)
goto out;
} else {
umode_t mode;
mode = attrs & DEBUGFS_S_IRUSR ? 0400 : 0;
mode |= attrs & DEBUGFS_S_IWUSR ? 0200 : 0;
dentry = debugfs_create_file(name, mode, parent, bpmp,
&bpmp_debug_fops);
if (IS_ERR(dentry)) {
err = -ENOMEM;
goto out;
}
}
}
out:
kfree(pathbuf);
kfree(buf);
return err;
}
static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
dma_addr_t name, size_t sz_name,
dma_addr_t data, size_t sz_data,
size_t *nbytes)
{
struct mrq_debugfs_request req = {
.cmd = CMD_DEBUGFS_READ,
.fop = {
.fnameaddr = (u32)name,
.fnamelen = (u32)sz_name,
.dataaddr = (u32)data,
.datalen = (u32)sz_data,
},
};
struct mrq_debugfs_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUGFS,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
int err;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
*nbytes = (size_t)resp.fop.nbytes;
return 0;
}
static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
dma_addr_t name, size_t sz_name,
dma_addr_t data, size_t sz_data)
{
const struct mrq_debugfs_request req = {
.cmd = CMD_DEBUGFS_WRITE,
.fop = {
.fnameaddr = (u32)name,
.fnamelen = (u32)sz_name,
.dataaddr = (u32)data,
.datalen = (u32)sz_data,
},
};
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUGFS,
.tx = {
.data = &req,
.size = sizeof(req),
},
};
return tegra_bpmp_transfer(bpmp, &msg);
}
static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
size_t size, size_t *nbytes)
{
const struct mrq_debugfs_request req = {
.cmd = CMD_DEBUGFS_DUMPDIR,
.dumpdir = {
.dataaddr = (u32)addr,
.datalen = (u32)size,
},
};
struct mrq_debugfs_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_DEBUGFS,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
int err;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
else if (msg.rx.ret < 0)
return -EINVAL;
*nbytes = (size_t)resp.dumpdir.nbytes;
return 0;
}
static int debugfs_show(struct seq_file *m, void *p)
{
struct file *file = m->private;
struct inode *inode = file_inode(file);
struct tegra_bpmp *bpmp = inode->i_private;
const size_t datasize = m->size;
const size_t namesize = SZ_256;
void *datavirt, *namevirt;
dma_addr_t dataphys, namephys;
char buf[256];
const char *filename;
size_t len, nbytes;
int err;
filename = get_filename(bpmp, file, buf, sizeof(buf));
if (!filename)
return -ENOENT;
namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
GFP_KERNEL | GFP_DMA32);
if (!namevirt)
return -ENOMEM;
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
err = -ENOMEM;
goto free_namebuf;
}
len = strlen(filename);
strncpy(namevirt, filename, namesize);
err = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
&nbytes);
if (!err)
seq_write(m, datavirt, nbytes);
dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
return err;
}
static int debugfs_open(struct inode *inode, struct file *file)
{
return single_open_size(file, debugfs_show, file, SZ_128K);
}
static ssize_t debugfs_store(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct inode *inode = file_inode(file);
struct tegra_bpmp *bpmp = inode->i_private;
const size_t datasize = count;
const size_t namesize = SZ_256;
void *datavirt, *namevirt;
dma_addr_t dataphys, namephys;
char fnamebuf[256];
const char *filename;
size_t len;
int err;
filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
if (!filename)
return -ENOENT;
namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
GFP_KERNEL | GFP_DMA32);
if (!namevirt)
return -ENOMEM;
datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
GFP_KERNEL | GFP_DMA32);
if (!datavirt) {
err = -ENOMEM;
goto free_namebuf;
}
len = strlen(filename);
strncpy(namevirt, filename, namesize);
if (copy_from_user(datavirt, buf, count)) {
err = -EFAULT;
goto free_databuf;
}
err = mrq_debugfs_write(bpmp, namephys, len, dataphys,
count);
free_databuf:
dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
free_namebuf:
dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
return err ?: count;
}
static const struct file_operations debugfs_fops = {
.open = debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.write = debugfs_store,
.release = single_release,
};
static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
struct dentry *parent, u32 depth)
{
int err;
u32 d, t;
const char *name;
struct dentry *dentry;
while (!seqbuf_eof(seqbuf)) {
err = seqbuf_read_u32(seqbuf, &d);
if (err < 0)
return err;
if (d < depth) {
seqbuf_seek(seqbuf, -4);
/* go up a level */
return 0;
} else if (d != depth) {
/* malformed data received from BPMP */
return -EIO;
}
err = seqbuf_read_u32(seqbuf, &t);
if (err < 0)
return err;
err = seqbuf_read_str(seqbuf, &name);
if (err < 0)
return err;
if (t & DEBUGFS_S_ISDIR) {
dentry = debugfs_create_dir(name, parent);
if (IS_ERR(dentry))
return -ENOMEM;
err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
if (err < 0)
return err;
} else {
umode_t mode;
mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
dentry = debugfs_create_file(name, mode,
parent, bpmp,
&debugfs_fops);
if (IS_ERR(dentry))
return -ENOMEM;
}
}
return 0;
}
static int bpmp_populate_debugfs_shmem(struct tegra_bpmp *bpmp)
{
struct seqbuf seqbuf;
const size_t sz = SZ_512K;
dma_addr_t phys;
size_t nbytes;
void *virt;
int err;
virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
GFP_KERNEL | GFP_DMA32);
if (!virt)
return -ENOMEM;
err = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
if (err < 0) {
goto free;
} else if (nbytes > sz) {
err = -EINVAL;
goto free;
}
seqbuf_init(&seqbuf, virt, nbytes);
err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
free:
dma_free_coherent(bpmp->dev, sz, virt, phys);
return err;
}
int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
{
struct dentry *root;
bool inband;
int err;
inband = tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUG);
if (!inband && !tegra_bpmp_mrq_is_supported(bpmp, MRQ_DEBUGFS))
return 0;
root = debugfs_create_dir("bpmp", NULL);
if (IS_ERR(root))
return -ENOMEM;
bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
if (IS_ERR(bpmp->debugfs_mirror)) {
err = -ENOMEM;
goto out;
}
if (inband)
err = bpmp_populate_debugfs_inband(bpmp, bpmp->debugfs_mirror,
"/");
else
err = bpmp_populate_debugfs_shmem(bpmp);
out:
if (err < 0)
debugfs_remove_recursive(root);
return err;
}
| linux-master | drivers/firmware/tegra/bpmp-debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
#include "bpmp-private.h"
#define TRIGGER_OFFSET 0x000
#define RESULT_OFFSET(id) (0xc00 + id * 4)
#define TRIGGER_ID_SHIFT 16
#define TRIGGER_CMD_GET 4
#define STA_OFFSET 0
#define SET_OFFSET 4
#define CLR_OFFSET 8
#define CH_MASK(ch) (0x3 << ((ch) * 2))
#define SL_SIGL(ch) (0x0 << ((ch) * 2))
#define SL_QUED(ch) (0x1 << ((ch) * 2))
#define MA_FREE(ch) (0x2 << ((ch) * 2))
#define MA_ACKD(ch) (0x3 << ((ch) * 2))
struct tegra210_bpmp {
void __iomem *atomics;
void __iomem *arb_sema;
struct irq_data *tx_irq_data;
};
static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
{
struct tegra210_bpmp *priv = bpmp->priv;
return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
}
static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
{
unsigned int index = channel->index;
return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
}
static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
{
unsigned int index = channel->index;
return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
}
static bool
tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned int index = channel->index;
return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
}
static bool
tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned int index = channel->index;
return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
}
static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
{
struct tegra210_bpmp *priv = channel->bpmp->priv;
__raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
return 0;
}
static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
{
struct tegra210_bpmp *priv = channel->bpmp->priv;
__raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
return 0;
}
static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
{
struct tegra210_bpmp *priv = channel->bpmp->priv;
__raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
priv->arb_sema + CLR_OFFSET);
return 0;
}
static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
{
struct tegra210_bpmp *priv = channel->bpmp->priv;
__raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
return 0;
}
static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
{
struct tegra210_bpmp *priv = bpmp->priv;
struct irq_data *irq_data = priv->tx_irq_data;
/*
* Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
* available messages
*/
if (irq_data->chip->irq_retrigger)
return irq_data->chip->irq_retrigger(irq_data);
return -EINVAL;
}
static irqreturn_t rx_irq(int irq, void *data)
{
struct tegra_bpmp *bpmp = data;
tegra_bpmp_handle_rx(bpmp);
return IRQ_HANDLED;
}
static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
struct tegra_bpmp *bpmp,
unsigned int index)
{
struct tegra210_bpmp *priv = bpmp->priv;
void __iomem *p;
u32 address;
/* Retrieve channel base address from BPMP */
writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
priv->atomics + TRIGGER_OFFSET);
address = readl(priv->atomics + RESULT_OFFSET(index));
p = devm_ioremap(bpmp->dev, address, 0x80);
if (!p)
return -ENOMEM;
iosys_map_set_vaddr_iomem(&channel->ib, p);
iosys_map_set_vaddr_iomem(&channel->ob, p);
channel->index = index;
init_completion(&channel->completion);
channel->bpmp = bpmp;
return 0;
}
static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
{
struct platform_device *pdev = to_platform_device(bpmp->dev);
struct tegra210_bpmp *priv;
unsigned int i;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
bpmp->priv = priv;
priv->atomics = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->atomics))
return PTR_ERR(priv->atomics);
priv->arb_sema = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->arb_sema))
return PTR_ERR(priv->arb_sema);
err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
bpmp->soc->channels.cpu_tx.offset);
if (err < 0)
return err;
err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
bpmp->soc->channels.cpu_rx.offset);
if (err < 0)
return err;
for (i = 0; i < bpmp->threaded.count; i++) {
unsigned int index = bpmp->soc->channels.thread.offset + i;
err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
bpmp, index);
if (err < 0)
return err;
}
err = platform_get_irq_byname(pdev, "tx");
if (err < 0)
return err;
priv->tx_irq_data = irq_get_irq_data(err);
if (!priv->tx_irq_data) {
dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
return -ENOENT;
}
err = platform_get_irq_byname(pdev, "rx");
if (err < 0)
return err;
err = devm_request_irq(&pdev->dev, err, rx_irq,
IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
return err;
}
return 0;
}
const struct tegra_bpmp_ops tegra210_bpmp_ops = {
.init = tegra210_bpmp_init,
.is_response_ready = tegra210_bpmp_is_response_ready,
.is_request_ready = tegra210_bpmp_is_request_ready,
.ack_response = tegra210_bpmp_ack_response,
.ack_request = tegra210_bpmp_ack_request,
.is_response_channel_free = tegra210_bpmp_is_response_channel_free,
.is_request_channel_free = tegra210_bpmp_is_request_channel_free,
.post_response = tegra210_bpmp_post_response,
.post_request = tegra210_bpmp_post_request,
.ring_doorbell = tegra210_bpmp_ring_doorbell,
};
| linux-master | drivers/firmware/tegra/bpmp-tegra210.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/semaphore.h>
#include <linux/sched/clock.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/ivc.h>
#include "bpmp-private.h"
#define MSG_ACK BIT(0)
#define MSG_RING BIT(1)
#define TAG_SZ 32
static inline struct tegra_bpmp *
mbox_client_to_bpmp(struct mbox_client *client)
{
return container_of(client, struct tegra_bpmp, mbox.client);
}
static inline const struct tegra_bpmp_ops *
channel_to_ops(struct tegra_bpmp_channel *channel)
{
struct tegra_bpmp *bpmp = channel->bpmp;
return bpmp->soc->ops;
}
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
struct platform_device *pdev;
struct tegra_bpmp *bpmp;
struct device_node *np;
np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
if (!np)
return ERR_PTR(-ENOENT);
pdev = of_find_device_by_node(np);
if (!pdev) {
bpmp = ERR_PTR(-ENODEV);
goto put;
}
bpmp = platform_get_drvdata(pdev);
if (!bpmp) {
bpmp = ERR_PTR(-EPROBE_DEFER);
put_device(&pdev->dev);
goto put;
}
put:
of_node_put(np);
return bpmp;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_get);
void tegra_bpmp_put(struct tegra_bpmp *bpmp)
{
if (bpmp)
put_device(bpmp->dev);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_put);
static int
tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
{
struct tegra_bpmp *bpmp = channel->bpmp;
unsigned int count;
int index;
count = bpmp->soc->channels.thread.count;
index = channel - channel->bpmp->threaded_channels;
if (index < 0 || index >= count)
return -EINVAL;
return index;
}
static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
{
return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
(msg->rx.size <= MSG_DATA_MIN_SZ) &&
(msg->tx.size == 0 || msg->tx.data) &&
(msg->rx.size == 0 || msg->rx.data);
}
static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->is_response_ready(channel);
}
static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->is_request_ready(channel);
}
static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t end;
end = ktime_add_us(ktime_get(), timeout);
do {
if (tegra_bpmp_is_response_ready(channel))
return 0;
} while (ktime_before(ktime_get(), end));
return -ETIMEDOUT;
}
static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->ack_response(channel);
}
static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->ack_request(channel);
}
static bool
tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->is_request_channel_free(channel);
}
static bool
tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->is_response_channel_free(channel);
}
static int
tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t start, now;
start = ns_to_ktime(local_clock());
do {
if (tegra_bpmp_is_request_channel_free(channel))
return 0;
now = ns_to_ktime(local_clock());
} while (ktime_us_delta(now, start) < timeout);
return -ETIMEDOUT;
}
static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->post_request(channel);
}
static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
{
const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
return ops->post_response(channel);
}
static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
{
return bpmp->soc->ops->ring_doorbell(bpmp);
}
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
void *data, size_t size, int *ret)
{
int err;
if (data && size > 0)
tegra_bpmp_mb_read(data, &channel->ib, size);
err = tegra_bpmp_ack_response(channel);
if (err < 0)
return err;
*ret = tegra_bpmp_mb_read_field(&channel->ib, code);
return 0;
}
static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
void *data, size_t size, int *ret)
{
struct tegra_bpmp *bpmp = channel->bpmp;
unsigned long flags;
ssize_t err;
int index;
index = tegra_bpmp_channel_get_thread_index(channel);
if (index < 0) {
err = index;
goto unlock;
}
spin_lock_irqsave(&bpmp->lock, flags);
err = __tegra_bpmp_channel_read(channel, data, size, ret);
clear_bit(index, bpmp->threaded.allocated);
spin_unlock_irqrestore(&bpmp->lock, flags);
unlock:
up(&bpmp->threaded.lock);
return err;
}
static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
unsigned int mrq, unsigned long flags,
const void *data, size_t size)
{
tegra_bpmp_mb_write_field(&channel->ob, code, mrq);
tegra_bpmp_mb_write_field(&channel->ob, flags, flags);
if (data && size > 0)
tegra_bpmp_mb_write(&channel->ob, data, size);
return tegra_bpmp_post_request(channel);
}
static struct tegra_bpmp_channel *
tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
const void *data, size_t size)
{
unsigned long timeout = bpmp->soc->channels.thread.timeout;
unsigned int count = bpmp->soc->channels.thread.count;
struct tegra_bpmp_channel *channel;
unsigned long flags;
unsigned int index;
int err;
err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
if (err < 0)
return ERR_PTR(err);
spin_lock_irqsave(&bpmp->lock, flags);
index = find_first_zero_bit(bpmp->threaded.allocated, count);
if (index == count) {
err = -EBUSY;
goto unlock;
}
channel = &bpmp->threaded_channels[index];
if (!tegra_bpmp_is_request_channel_free(channel)) {
err = -EBUSY;
goto unlock;
}
set_bit(index, bpmp->threaded.allocated);
err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
data, size);
if (err < 0)
goto clear_allocated;
set_bit(index, bpmp->threaded.busy);
spin_unlock_irqrestore(&bpmp->lock, flags);
return channel;
clear_allocated:
clear_bit(index, bpmp->threaded.allocated);
unlock:
spin_unlock_irqrestore(&bpmp->lock, flags);
up(&bpmp->threaded.lock);
return ERR_PTR(err);
}
static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
unsigned int mrq, unsigned long flags,
const void *data, size_t size)
{
int err;
err = tegra_bpmp_wait_request_channel_free(channel);
if (err < 0)
return err;
return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
}
int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
struct tegra_bpmp_message *msg)
{
struct tegra_bpmp_channel *channel;
int err;
if (WARN_ON(!irqs_disabled()))
return -EPERM;
if (!tegra_bpmp_message_valid(msg))
return -EINVAL;
channel = bpmp->tx_channel;
spin_lock(&bpmp->atomic_tx_lock);
err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
msg->tx.data, msg->tx.size);
if (err < 0) {
spin_unlock(&bpmp->atomic_tx_lock);
return err;
}
spin_unlock(&bpmp->atomic_tx_lock);
err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
err = tegra_bpmp_wait_response(channel);
if (err < 0)
return err;
return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
&msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
struct tegra_bpmp_message *msg)
{
struct tegra_bpmp_channel *channel;
unsigned long timeout;
int err;
if (WARN_ON(irqs_disabled()))
return -EPERM;
if (!tegra_bpmp_message_valid(msg))
return -EINVAL;
channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
msg->tx.size);
if (IS_ERR(channel))
return PTR_ERR(channel);
err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
err = wait_for_completion_timeout(&channel->completion, timeout);
if (err == 0)
return -ETIMEDOUT;
return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
&msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq)
{
struct tegra_bpmp_mrq *entry;
list_for_each_entry(entry, &bpmp->mrqs, list)
if (entry->mrq == mrq)
return entry;
return NULL;
}
void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
const void *data, size_t size)
{
unsigned long flags = tegra_bpmp_mb_read_field(&channel->ib, flags);
struct tegra_bpmp *bpmp = channel->bpmp;
int err;
if (WARN_ON(size > MSG_DATA_MIN_SZ))
return;
err = tegra_bpmp_ack_request(channel);
if (WARN_ON(err < 0))
return;
if ((flags & MSG_ACK) == 0)
return;
if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
return;
tegra_bpmp_mb_write_field(&channel->ob, code, code);
if (data && size > 0)
tegra_bpmp_mb_write(&channel->ob, data, size);
err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
return;
if (flags & MSG_RING) {
err = tegra_bpmp_ring_doorbell(bpmp);
if (WARN_ON(err < 0))
return;
}
}
EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq,
struct tegra_bpmp_channel *channel)
{
struct tegra_bpmp_mrq *entry;
u32 zero = 0;
spin_lock(&bpmp->lock);
entry = tegra_bpmp_find_mrq(bpmp, mrq);
if (!entry) {
spin_unlock(&bpmp->lock);
tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
return;
}
entry->handler(mrq, channel, entry->data);
spin_unlock(&bpmp->lock);
}
int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
tegra_bpmp_mrq_handler_t handler, void *data)
{
struct tegra_bpmp_mrq *entry;
unsigned long flags;
if (!handler)
return -EINVAL;
entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
spin_lock_irqsave(&bpmp->lock, flags);
entry->mrq = mrq;
entry->handler = handler;
entry->data = data;
list_add(&entry->list, &bpmp->mrqs);
spin_unlock_irqrestore(&bpmp->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
{
struct tegra_bpmp_mrq *entry;
unsigned long flags;
spin_lock_irqsave(&bpmp->lock, flags);
entry = tegra_bpmp_find_mrq(bpmp, mrq);
if (!entry)
goto unlock;
list_del(&entry->list);
devm_kfree(bpmp->dev, entry);
unlock:
spin_unlock_irqrestore(&bpmp->lock, flags);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
{
struct mrq_query_abi_request req = { .mrq = mrq };
struct mrq_query_abi_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_QUERY_ABI,
.tx = {
.data = &req,
.size = sizeof(req),
},
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
int err;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err || msg.rx.ret)
return false;
return resp.status == 0;
}
EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
struct tegra_bpmp_channel *channel,
void *data)
{
struct mrq_ping_request request;
struct mrq_ping_response response;
tegra_bpmp_mb_read(&request, &channel->ib, sizeof(request));
memset(&response, 0, sizeof(response));
response.reply = request.challenge << 1;
tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
}
static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
{
struct mrq_ping_response response;
struct mrq_ping_request request;
struct tegra_bpmp_message msg;
unsigned long flags;
ktime_t start, end;
int err;
memset(&request, 0, sizeof(request));
request.challenge = 1;
memset(&response, 0, sizeof(response));
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_PING;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
msg.rx.data = &response;
msg.rx.size = sizeof(response);
local_irq_save(flags);
start = ktime_get();
err = tegra_bpmp_transfer_atomic(bpmp, &msg);
end = ktime_get();
local_irq_restore(flags);
if (!err)
dev_dbg(bpmp->dev,
"ping ok: challenge: %u, response: %u, time: %lld\n",
request.challenge, response.reply,
ktime_to_us(ktime_sub(end, start)));
return err;
}
/* deprecated version of tag query */
static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
size_t size)
{
struct mrq_query_tag_request request;
struct tegra_bpmp_message msg;
unsigned long flags;
dma_addr_t phys;
void *virt;
int err;
if (size != TAG_SZ)
return -EINVAL;
virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
GFP_KERNEL | GFP_DMA32);
if (!virt)
return -ENOMEM;
memset(&request, 0, sizeof(request));
request.addr = phys;
memset(&msg, 0, sizeof(msg));
msg.mrq = MRQ_QUERY_TAG;
msg.tx.data = &request;
msg.tx.size = sizeof(request);
local_irq_save(flags);
err = tegra_bpmp_transfer_atomic(bpmp, &msg);
local_irq_restore(flags);
if (err == 0)
memcpy(tag, virt, TAG_SZ);
dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
return err;
}
static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
size_t size)
{
if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
struct mrq_query_fw_tag_response resp;
struct tegra_bpmp_message msg = {
.mrq = MRQ_QUERY_FW_TAG,
.rx = {
.data = &resp,
.size = sizeof(resp),
},
};
int err;
if (size != sizeof(resp.tag))
return -EINVAL;
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret < 0)
return -EINVAL;
memcpy(tag, resp.tag, sizeof(resp.tag));
return 0;
}
return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
}
static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
{
unsigned long flags = tegra_bpmp_mb_read_field(&channel->ob, flags);
if ((flags & MSG_RING) == 0)
return;
complete(&channel->completion);
}
void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
{
struct tegra_bpmp_channel *channel;
unsigned int i, count;
unsigned long *busy;
channel = bpmp->rx_channel;
count = bpmp->soc->channels.thread.count;
busy = bpmp->threaded.busy;
if (tegra_bpmp_is_request_ready(channel)) {
unsigned int mrq = tegra_bpmp_mb_read_field(&channel->ib, code);
tegra_bpmp_handle_mrq(bpmp, mrq, channel);
}
spin_lock(&bpmp->lock);
for_each_set_bit(i, busy, count) {
struct tegra_bpmp_channel *channel;
channel = &bpmp->threaded_channels[i];
if (tegra_bpmp_is_response_ready(channel)) {
tegra_bpmp_channel_signal(channel);
clear_bit(i, busy);
}
}
spin_unlock(&bpmp->lock);
}
static int tegra_bpmp_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp;
char tag[TAG_SZ];
size_t size;
int err;
bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
if (!bpmp)
return -ENOMEM;
bpmp->soc = of_device_get_match_data(&pdev->dev);
bpmp->dev = &pdev->dev;
INIT_LIST_HEAD(&bpmp->mrqs);
spin_lock_init(&bpmp->lock);
bpmp->threaded.count = bpmp->soc->channels.thread.count;
sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!bpmp->threaded.allocated)
return -ENOMEM;
bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!bpmp->threaded.busy)
return -ENOMEM;
spin_lock_init(&bpmp->atomic_tx_lock);
bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
GFP_KERNEL);
if (!bpmp->tx_channel)
return -ENOMEM;
bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
GFP_KERNEL);
if (!bpmp->rx_channel)
return -ENOMEM;
bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
sizeof(*bpmp->threaded_channels),
GFP_KERNEL);
if (!bpmp->threaded_channels)
return -ENOMEM;
platform_set_drvdata(pdev, bpmp);
err = bpmp->soc->ops->init(bpmp);
if (err < 0)
return err;
err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
tegra_bpmp_mrq_handle_ping, bpmp);
if (err < 0)
goto deinit;
err = tegra_bpmp_ping(bpmp);
if (err < 0) {
dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
goto free_mrq;
}
err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
if (err < 0) {
dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
goto free_mrq;
}
dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
if (err < 0)
goto free_mrq;
if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
err = tegra_bpmp_init_clocks(bpmp);
if (err < 0)
goto free_mrq;
}
if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
err = tegra_bpmp_init_resets(bpmp);
if (err < 0)
goto free_mrq;
}
if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
err = tegra_bpmp_init_powergates(bpmp);
if (err < 0)
goto free_mrq;
}
err = tegra_bpmp_init_debugfs(bpmp);
if (err < 0)
dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
return 0;
free_mrq:
tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
deinit:
if (bpmp->soc->ops->deinit)
bpmp->soc->ops->deinit(bpmp);
return err;
}
static int __maybe_unused tegra_bpmp_resume(struct device *dev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
if (bpmp->soc->ops->resume)
return bpmp->soc->ops->resume(bpmp);
else
return 0;
}
static const struct dev_pm_ops tegra_bpmp_pm_ops = {
.resume_noirq = tegra_bpmp_resume,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
.offset = 3,
.timeout = 60 * USEC_PER_SEC,
},
.thread = {
.offset = 0,
.count = 3,
.timeout = 600 * USEC_PER_SEC,
},
.cpu_rx = {
.offset = 13,
.timeout = 0,
},
},
.ops = &tegra186_bpmp_ops,
.num_resets = 193,
};
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
static const struct tegra_bpmp_soc tegra210_soc = {
.channels = {
.cpu_tx = {
.offset = 0,
.count = 1,
.timeout = 60 * USEC_PER_SEC,
},
.thread = {
.offset = 4,
.count = 1,
.timeout = 600 * USEC_PER_SEC,
},
.cpu_rx = {
.offset = 8,
.count = 1,
.timeout = 0,
},
},
.ops = &tegra210_bpmp_ops,
};
#endif
static const struct of_device_id tegra_bpmp_match[] = {
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
{ .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
#endif
{ }
};
static struct platform_driver tegra_bpmp_driver = {
.driver = {
.name = "tegra-bpmp",
.of_match_table = tegra_bpmp_match,
.pm = &tegra_bpmp_pm_ops,
.suppress_bind_attrs = true,
},
.probe = tegra_bpmp_probe,
};
builtin_platform_driver(tegra_bpmp_driver);
| linux-master | drivers/firmware/tegra/bpmp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*/
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/ivc.h>
#include "bpmp-private.h"
struct tegra186_bpmp {
struct tegra_bpmp *parent;
struct {
struct gen_pool *pool;
union {
void __iomem *sram;
void *dram;
};
dma_addr_t phys;
} tx, rx;
struct {
struct mbox_client client;
struct mbox_chan *channel;
} mbox;
};
static inline struct tegra_bpmp *
mbox_client_to_bpmp(struct mbox_client *client)
{
struct tegra186_bpmp *priv;
priv = container_of(client, struct tegra186_bpmp, mbox.client);
return priv->parent;
}
static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
{
int err;
err = tegra_ivc_read_get_next_frame(channel->ivc, &channel->ib);
if (err) {
iosys_map_clear(&channel->ib);
return false;
}
return true;
}
static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
{
int err;
err = tegra_ivc_write_get_next_frame(channel->ivc, &channel->ob);
if (err) {
iosys_map_clear(&channel->ob);
return false;
}
return true;
}
static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
{
return tegra_ivc_read_advance(channel->ivc);
}
static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
{
return tegra_ivc_write_advance(channel->ivc);
}
static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
int err;
err = mbox_send_message(priv->mbox.channel, NULL);
if (err < 0)
return err;
mbox_client_txdone(priv->mbox.channel, 0);
return 0;
}
static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
{
struct tegra_bpmp *bpmp = data;
struct tegra186_bpmp *priv = bpmp->priv;
if (WARN_ON(priv->mbox.channel == NULL))
return;
tegra186_bpmp_ring_doorbell(bpmp);
}
static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
struct tegra_bpmp *bpmp,
unsigned int index)
{
struct tegra186_bpmp *priv = bpmp->priv;
size_t message_size, queue_size;
struct iosys_map rx, tx;
unsigned int offset;
int err;
channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
GFP_KERNEL);
if (!channel->ivc)
return -ENOMEM;
message_size = tegra_ivc_align(MSG_MIN_SZ);
queue_size = tegra_ivc_total_queue_size(message_size);
offset = queue_size * index;
if (priv->rx.pool) {
iosys_map_set_vaddr_iomem(&rx, priv->rx.sram + offset);
iosys_map_set_vaddr_iomem(&tx, priv->tx.sram + offset);
} else {
iosys_map_set_vaddr(&rx, priv->rx.dram + offset);
iosys_map_set_vaddr(&tx, priv->tx.dram + offset);
}
err = tegra_ivc_init(channel->ivc, NULL, &rx, priv->rx.phys + offset, &tx,
priv->tx.phys + offset, 1, message_size, tegra186_bpmp_ivc_notify,
bpmp);
if (err < 0) {
dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
index, err);
return err;
}
init_completion(&channel->completion);
channel->bpmp = bpmp;
return 0;
}
static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
{
/* reset the channel state */
tegra_ivc_reset(channel->ivc);
/* sync the channel state with BPMP */
while (tegra_ivc_notified(channel->ivc))
;
}
static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
{
tegra_ivc_cleanup(channel->ivc);
}
static void mbox_handle_rx(struct mbox_client *client, void *data)
{
struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
tegra_bpmp_handle_rx(bpmp);
}
static void tegra186_bpmp_teardown_channels(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
unsigned int i;
for (i = 0; i < bpmp->threaded.count; i++) {
if (!bpmp->threaded_channels[i].bpmp)
continue;
tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
}
tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
if (priv->tx.pool) {
gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.sram, 4096);
}
}
static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
struct device_node *np;
struct resource res;
size_t size;
int err;
np = of_parse_phandle(bpmp->dev->of_node, "memory-region", 0);
if (!np)
return -ENODEV;
err = of_address_to_resource(np, 0, &res);
if (err < 0) {
dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
return err;
}
size = resource_size(&res);
if (size < SZ_8K) {
dev_warn(bpmp->dev, "DRAM region must be larger than 8 KiB\n");
return -EINVAL;
}
priv->tx.phys = res.start;
priv->rx.phys = res.start + SZ_4K;
priv->tx.dram = devm_memremap(bpmp->dev, priv->tx.phys, size,
MEMREMAP_WC);
if (IS_ERR(priv->tx.dram)) {
err = PTR_ERR(priv->tx.dram);
dev_warn(bpmp->dev, "failed to map DRAM region: %d\n", err);
return err;
}
priv->rx.dram = priv->tx.dram + SZ_4K;
return 0;
}
static int tegra186_bpmp_sram_init(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
int err;
priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
if (!priv->tx.pool) {
dev_err(bpmp->dev, "TX shmem pool not found\n");
return -EPROBE_DEFER;
}
priv->tx.sram = (void __iomem *)gen_pool_dma_alloc(priv->tx.pool, 4096,
&priv->tx.phys);
if (!priv->tx.sram) {
dev_err(bpmp->dev, "failed to allocate from TX pool\n");
return -ENOMEM;
}
priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
if (!priv->rx.pool) {
dev_err(bpmp->dev, "RX shmem pool not found\n");
err = -EPROBE_DEFER;
goto free_tx;
}
priv->rx.sram = (void __iomem *)gen_pool_dma_alloc(priv->rx.pool, 4096,
&priv->rx.phys);
if (!priv->rx.sram) {
dev_err(bpmp->dev, "failed to allocate from RX pool\n");
err = -ENOMEM;
goto free_tx;
}
return 0;
free_tx:
gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.sram, 4096);
return err;
}
static int tegra186_bpmp_setup_channels(struct tegra_bpmp *bpmp)
{
unsigned int i;
int err;
err = tegra186_bpmp_dram_init(bpmp);
if (err == -ENODEV) {
err = tegra186_bpmp_sram_init(bpmp);
if (err < 0)
return err;
}
err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
bpmp->soc->channels.cpu_tx.offset);
if (err < 0)
return err;
err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
bpmp->soc->channels.cpu_rx.offset);
if (err < 0) {
tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
return err;
}
for (i = 0; i < bpmp->threaded.count; i++) {
unsigned int index = bpmp->soc->channels.thread.offset + i;
err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
bpmp, index);
if (err < 0)
break;
}
if (err < 0)
tegra186_bpmp_teardown_channels(bpmp);
return err;
}
static void tegra186_bpmp_reset_channels(struct tegra_bpmp *bpmp)
{
unsigned int i;
/* reset message channels */
tegra186_bpmp_channel_reset(bpmp->tx_channel);
tegra186_bpmp_channel_reset(bpmp->rx_channel);
for (i = 0; i < bpmp->threaded.count; i++)
tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
}
static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv;
int err;
priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->parent = bpmp;
bpmp->priv = priv;
err = tegra186_bpmp_setup_channels(bpmp);
if (err < 0)
return err;
/* mbox registration */
priv->mbox.client.dev = bpmp->dev;
priv->mbox.client.rx_callback = mbox_handle_rx;
priv->mbox.client.tx_block = false;
priv->mbox.client.knows_txdone = false;
priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
if (IS_ERR(priv->mbox.channel)) {
err = PTR_ERR(priv->mbox.channel);
dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
tegra186_bpmp_teardown_channels(bpmp);
return err;
}
tegra186_bpmp_reset_channels(bpmp);
return 0;
}
static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
{
struct tegra186_bpmp *priv = bpmp->priv;
mbox_free_channel(priv->mbox.channel);
tegra186_bpmp_teardown_channels(bpmp);
}
static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
{
tegra186_bpmp_reset_channels(bpmp);
return 0;
}
const struct tegra_bpmp_ops tegra186_bpmp_ops = {
.init = tegra186_bpmp_init,
.deinit = tegra186_bpmp_deinit,
.is_response_ready = tegra186_bpmp_is_message_ready,
.is_request_ready = tegra186_bpmp_is_message_ready,
.ack_response = tegra186_bpmp_ack_message,
.ack_request = tegra186_bpmp_ack_message,
.is_response_channel_free = tegra186_bpmp_is_channel_free,
.is_request_channel_free = tegra186_bpmp_is_channel_free,
.post_response = tegra186_bpmp_post_message,
.post_request = tegra186_bpmp_post_message,
.ring_doorbell = tegra186_bpmp_ring_doorbell,
.resume = tegra186_bpmp_resume,
};
| linux-master | drivers/firmware/tegra/bpmp-tegra186.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Amlogic Secure Monitor driver
*
* Copyright (C) 2016 Endless Mobile, Inc.
* Author: Carlo Caione <[email protected]>
*/
#define pr_fmt(fmt) "meson-sm: " fmt
#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/firmware/meson/meson_sm.h>
struct meson_sm_cmd {
unsigned int index;
u32 smc_id;
};
#define CMD(d, s) { .index = (d), .smc_id = (s), }
struct meson_sm_chip {
unsigned int shmem_size;
u32 cmd_shmem_in_base;
u32 cmd_shmem_out_base;
struct meson_sm_cmd cmd[];
};
static const struct meson_sm_chip gxbb_chip = {
.shmem_size = SZ_4K,
.cmd_shmem_in_base = 0x82000020,
.cmd_shmem_out_base = 0x82000021,
.cmd = {
CMD(SM_EFUSE_READ, 0x82000030),
CMD(SM_EFUSE_WRITE, 0x82000031),
CMD(SM_EFUSE_USER_MAX, 0x82000033),
CMD(SM_GET_CHIP_ID, 0x82000044),
CMD(SM_A1_PWRC_SET, 0x82000093),
CMD(SM_A1_PWRC_GET, 0x82000095),
{ /* sentinel */ },
},
};
struct meson_sm_firmware {
const struct meson_sm_chip *chip;
void __iomem *sm_shmem_in_base;
void __iomem *sm_shmem_out_base;
};
static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
unsigned int cmd_index)
{
const struct meson_sm_cmd *cmd = chip->cmd;
while (cmd->smc_id && cmd->index != cmd_index)
cmd++;
return cmd->smc_id;
}
static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
u32 arg3, u32 arg4)
{
struct arm_smccc_res res;
arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
return res.a0;
}
static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
{
u32 sm_phy_base;
sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
if (!sm_phy_base)
return NULL;
return ioremap_cache(sm_phy_base, size);
}
/**
* meson_sm_call - generic SMC32 call to the secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @cmd_index: Index of the SMC32 function ID
* @ret: Returned value
* @arg0: SMC32 Argument 0
* @arg1: SMC32 Argument 1
* @arg2: SMC32 Argument 2
* @arg3: SMC32 Argument 3
* @arg4: SMC32 Argument 4
*
* Return: 0 on success, a negative value on error
*/
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 cmd, lret;
if (!fw->chip)
return -ENOENT;
cmd = meson_sm_get_cmd(fw->chip, cmd_index);
if (!cmd)
return -EINVAL;
lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
if (ret)
*ret = lret;
return 0;
}
EXPORT_SYMBOL(meson_sm_call);
/**
* meson_sm_call_read - retrieve data from secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @buffer: Buffer to store the retrieved data
* @bsize: Size of the buffer
* @cmd_index: Index of the SMC32 function ID
* @arg0: SMC32 Argument 0
* @arg1: SMC32 Argument 1
* @arg2: SMC32 Argument 2
* @arg3: SMC32 Argument 3
* @arg4: SMC32 Argument 4
*
* Return: size of read data on success, a negative value on error
* When 0 is returned there is no guarantee about the amount of
* data read and bsize bytes are copied in buffer.
*/
int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
unsigned int bsize, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 size;
int ret;
if (!fw->chip)
return -ENOENT;
if (!fw->chip->cmd_shmem_out_base)
return -EINVAL;
if (bsize > fw->chip->shmem_size)
return -EINVAL;
if (meson_sm_call(fw, cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (size > bsize)
return -EINVAL;
ret = size;
if (!size)
size = bsize;
if (buffer)
memcpy(buffer, fw->sm_shmem_out_base, size);
return ret;
}
EXPORT_SYMBOL(meson_sm_call_read);
/**
* meson_sm_call_write - send data to secure-monitor
*
* @fw: Pointer to secure-monitor firmware
* @buffer: Buffer containing data to send
* @size: Size of the data to send
* @cmd_index: Index of the SMC32 function ID
* @arg0: SMC32 Argument 0
* @arg1: SMC32 Argument 1
* @arg2: SMC32 Argument 2
* @arg3: SMC32 Argument 3
* @arg4: SMC32 Argument 4
*
* Return: size of sent data on success, a negative value on error
*/
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
u32 written;
if (!fw->chip)
return -ENOENT;
if (size > fw->chip->shmem_size)
return -EINVAL;
if (!fw->chip->cmd_shmem_in_base)
return -EINVAL;
memcpy(fw->sm_shmem_in_base, buffer, size);
if (meson_sm_call(fw, cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
return -EINVAL;
if (!written)
return -EINVAL;
return written;
}
EXPORT_SYMBOL(meson_sm_call_write);
/**
* meson_sm_get - get pointer to meson_sm_firmware structure.
*
* @sm_node: Pointer to the secure-monitor Device Tree node.
*
* Return: NULL is the secure-monitor device is not ready.
*/
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
if (!pdev)
return NULL;
return platform_get_drvdata(pdev);
}
EXPORT_SYMBOL_GPL(meson_sm_get);
#define SM_CHIP_ID_LENGTH 119
#define SM_CHIP_ID_OFFSET 4
#define SM_CHIP_ID_SIZE 12
static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
struct meson_sm_firmware *fw;
uint8_t *id_buf;
int ret;
fw = platform_get_drvdata(pdev);
id_buf = kmalloc(SM_CHIP_ID_LENGTH, GFP_KERNEL);
if (!id_buf)
return -ENOMEM;
ret = meson_sm_call_read(fw, id_buf, SM_CHIP_ID_LENGTH, SM_GET_CHIP_ID,
0, 0, 0, 0, 0);
if (ret < 0) {
kfree(id_buf);
return ret;
}
ret = sprintf(buf, "%12phN\n", &id_buf[SM_CHIP_ID_OFFSET]);
kfree(id_buf);
return ret;
}
static DEVICE_ATTR_RO(serial);
static struct attribute *meson_sm_sysfs_attributes[] = {
&dev_attr_serial.attr,
NULL,
};
static const struct attribute_group meson_sm_sysfs_attr_group = {
.attrs = meson_sm_sysfs_attributes,
};
static const struct of_device_id meson_sm_ids[] = {
{ .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
{ /* sentinel */ },
};
static int __init meson_sm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct meson_sm_chip *chip;
struct meson_sm_firmware *fw;
fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
chip = of_match_device(meson_sm_ids, dev)->data;
if (!chip)
return -EINVAL;
if (chip->cmd_shmem_in_base) {
fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
chip->shmem_size);
if (WARN_ON(!fw->sm_shmem_in_base))
goto out;
}
if (chip->cmd_shmem_out_base) {
fw->sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
chip->shmem_size);
if (WARN_ON(!fw->sm_shmem_out_base))
goto out_in_base;
}
fw->chip = chip;
platform_set_drvdata(pdev, fw);
if (devm_of_platform_populate(dev))
goto out_in_base;
if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group))
goto out_in_base;
pr_info("secure-monitor enabled\n");
return 0;
out_in_base:
iounmap(fw->sm_shmem_in_base);
out:
return -EINVAL;
}
static struct platform_driver meson_sm_driver = {
.driver = {
.name = "meson-sm",
.of_match_table = of_match_ptr(meson_sm_ids),
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/meson/meson_sm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message SMC/HVC
* Transport driver
*
* Copyright 2020 NXP
*/
#include <linux/arm-smccc.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/processor.h>
#include <linux/slab.h>
#include "common.h"
/*
* The shmem address is split into 4K page and offset.
* This is to make sure the parameters fit in 32bit arguments of the
* smc/hvc call to keep it uniform across smc32/smc64 conventions.
* This however limits the shmem address to 44 bit.
*
* These optional parameters can be used to distinguish among multiple
* scmi instances that are using the same smc-id.
* The page parameter is passed in r1/x1/w1 register and the offset parameter
* is passed in r2/x2/w2 register.
*/
#define SHMEM_SIZE (SZ_4K)
#define SHMEM_SHIFT 12
#define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
#define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
/**
* struct scmi_smc - Structure representing a SCMI smc transport
*
* @irq: An optional IRQ for completion
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
* Used when operating in atomic mode.
* @func_id: smc/hvc call function id
* @param_page: 4K page number of the shmem channel
* @param_offset: Offset within the 4K page of the shmem channel
*/
struct scmi_smc {
int irq;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
/* Protect access to shmem area */
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
atomic_t inflight;
u32 func_id;
u32 param_page;
u32 param_offset;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
{
struct scmi_smc *scmi_info = data;
scmi_rx_callback(scmi_info->cinfo,
shmem_read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED;
}
static bool smc_chan_available(struct device_node *of_node, int idx)
{
struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
if (!np)
return false;
of_node_put(np);
return true;
}
static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_init(&scmi_info->shmem_lock);
}
static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
{
int ret;
ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
return ret == INFLIGHT_NONE;
}
static inline void
smc_channel_lock_acquire(struct scmi_smc *scmi_info,
struct scmi_xfer *xfer __maybe_unused)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
else
mutex_lock(&scmi_info->shmem_lock);
}
static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_unlock(&scmi_info->shmem_lock);
}
static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
struct device *cdev = cinfo->dev;
struct scmi_smc *scmi_info;
resource_size_t size;
struct resource res;
struct device_node *np;
u32 func_id;
int ret;
if (!tx)
return -ENODEV;
scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
if (!scmi_info)
return -ENOMEM;
np = of_parse_phandle(cdev->of_node, "shmem", 0);
if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
of_node_put(np);
return -ENXIO;
}
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (ret) {
dev_err(cdev, "failed to get SCMI Tx shared memory\n");
return ret;
}
size = resource_size(&res);
scmi_info->shmem = devm_ioremap(dev, res.start, size);
if (!scmi_info->shmem) {
dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
return -EADDRNOTAVAIL;
}
ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
if (ret < 0)
return ret;
if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
scmi_info->param_page = SHMEM_PAGE(res.start);
scmi_info->param_offset = SHMEM_OFFSET(res.start);
}
/*
* If there is an interrupt named "a2p", then the service and
* completion of a message is signaled by an interrupt rather than by
* the return of the SMC call.
*/
scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
if (scmi_info->irq > 0) {
ret = request_irq(scmi_info->irq, smc_msg_done_isr,
IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
if (ret) {
dev_err(dev, "failed to setup SCMI smc irq\n");
return ret;
}
} else {
cinfo->no_completion_irq = true;
}
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info;
return 0;
}
static int smc_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_smc *scmi_info = cinfo->transport_info;
/* Ignore any possible further reception on the IRQ path */
if (scmi_info->irq > 0)
free_irq(scmi_info->irq, scmi_info);
cinfo->transport_info = NULL;
scmi_info->cinfo = NULL;
return 0;
}
static int smc_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
unsigned long page = scmi_info->param_page;
unsigned long offset = scmi_info->param_offset;
/*
* Channel will be released only once response has been
* surely fully retrieved, so after .mark_txdone()
*/
smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0,
&res);
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
if (res.a0) {
smc_channel_lock_release(scmi_info);
return -EOPNOTSUPP;
}
return 0;
}
static void smc_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
shmem_fetch_response(scmi_info->shmem, xfer);
}
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
smc_channel_lock_release(scmi_info);
}
static const struct scmi_transport_ops scmi_smc_ops = {
.chan_available = smc_chan_available,
.chan_setup = smc_chan_setup,
.chan_free = smc_chan_free,
.send_message = smc_send_message,
.mark_txdone = smc_mark_txdone,
.fetch_response = smc_fetch_response,
};
const struct scmi_desc scmi_smc_desc = {
.ops = &scmi_smc_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = 128,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
* SCMI command would have been already fully processed by the SCMI
* platform firmware and so any possible response value expected
* for the issued command will be immmediately ready to be fetched
* from the shared memory area.
*/
.sync_cmds_completed_on_ret = true,
.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
};
| linux-master | drivers/firmware/arm_scmi/smc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* For transports using message passing.
*
* Derived from shm.c.
*
* Copyright (C) 2019-2021 ARM Ltd.
* Copyright (C) 2020-2021 OpenSynergy GmbH
*/
#include <linux/types.h>
#include "common.h"
/*
* struct scmi_msg_payld - Transport SDU layout
*
* The SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian format only.
*/
struct scmi_msg_payld {
__le32 msg_header;
__le32 msg_payload[];
};
/**
* msg_command_size() - Actual size of transport SDU for command.
*
* @xfer: message which core has prepared for sending
*
* Return: transport SDU size.
*/
size_t msg_command_size(struct scmi_xfer *xfer)
{
return sizeof(struct scmi_msg_payld) + xfer->tx.len;
}
/**
* msg_response_size() - Maximum size of transport SDU for response.
*
* @xfer: message which core has prepared for sending
*
* Return: transport SDU size.
*/
size_t msg_response_size(struct scmi_xfer *xfer)
{
return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len;
}
/**
* msg_tx_prepare() - Set up transport SDU for command.
*
* @msg: transport SDU for command
* @xfer: message which is being sent
*/
void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer)
{
msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr));
if (xfer->tx.buf)
memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len);
}
/**
* msg_read_header() - Read SCMI header from transport SDU.
*
* @msg: transport SDU
*
* Return: SCMI header
*/
u32 msg_read_header(struct scmi_msg_payld *msg)
{
return le32_to_cpu(msg->msg_header);
}
/**
* msg_fetch_response() - Fetch response SCMI payload from transport SDU.
*
* @msg: transport SDU with response
* @len: transport SDU size
* @xfer: message being responded to
*/
void msg_fetch_response(struct scmi_msg_payld *msg, size_t len,
struct scmi_xfer *xfer)
{
size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]);
xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]);
xfer->rx.len = min_t(size_t, xfer->rx.len,
len >= prefix_len ? len - prefix_len : 0);
/* Take a copy to the rx buffer.. */
memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len);
}
/**
* msg_fetch_notification() - Fetch notification payload from transport SDU.
*
* @msg: transport SDU with notification
* @len: transport SDU size
* @max_len: maximum SCMI payload size to fetch
* @xfer: notification message
*/
void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len,
size_t max_len, struct scmi_xfer *xfer)
{
xfer->rx.len = min_t(size_t, max_len,
len >= sizeof(*msg) ? len - sizeof(*msg) : 0);
/* Take a copy to the rx buffer.. */
memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len);
}
| linux-master | drivers/firmware/arm_scmi/msg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Raw mode support
*
* Copyright (C) 2022 ARM Ltd.
*/
/**
* DOC: Theory of operation
*
* When enabled the SCMI Raw mode support exposes a userspace API which allows
* to send and receive SCMI commands, replies and notifications from a user
* application through injection and snooping of bare SCMI messages in binary
* little-endian format.
*
* Such injected SCMI transactions will then be routed through the SCMI core
* stack towards the SCMI backend server using whatever SCMI transport is
* currently configured on the system under test.
*
* It is meant to help in running any sort of SCMI backend server testing, no
* matter where the server is placed, as long as it is normally reachable via
* the transport configured on the system.
*
* It is activated by a Kernel configuration option since it is NOT meant to
* be used in production but only during development and in CI deployments.
*
* In order to avoid possible interferences between the SCMI Raw transactions
* originated from a test-suite and the normal operations of the SCMI drivers,
* when Raw mode is enabled, by default, all the regular SCMI drivers are
* inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this
* latter case the regular SCMI stack drivers will be loaded as usual and it is
* up to the user of this interface to take care of manually inhibiting the
* regular SCMI drivers in order to avoid interferences during the test runs.
*
* The exposed API is as follows.
*
* All SCMI Raw entries are rooted under a common top /raw debugfs top directory
* which in turn is rooted under the corresponding underlying SCMI instance.
*
* /sys/kernel/debug/scmi/
* `-- 0
* |-- atomic_threshold_us
* |-- instance_name
* |-- raw
* | |-- channels
* | | |-- 0x10
* | | | |-- message
* | | | `-- message_async
* | | `-- 0x13
* | | |-- message
* | | `-- message_async
* | |-- errors
* | |-- message
* | |-- message_async
* | |-- notification
* | `-- reset
* `-- transport
* |-- is_atomic
* |-- max_msg_size
* |-- max_rx_timeout_ms
* |-- rx_max_msg
* |-- tx_max_msg
* `-- type
*
* where:
*
* - errors: used to read back timed-out and unexpected replies
* - message*: used to send sync/async commands and read back immediate and
* delayed reponses (if any)
* - notification: used to read any notification being emitted by the system
* (if previously enabled by the user app)
* - reset: used to flush the queues of messages (of any kind) still pending
* to be read; this is useful at test-suite start/stop to get
* rid of any unread messages from the previous run.
*
* with the per-channel entries rooted at /channels being present only on a
* system where multiple transport channels have been configured.
*
* Such per-channel entries can be used to explicitly choose a specific channel
* for SCMI bare message injection, in contrast with the general entries above
* where, instead, the selection of the proper channel to use is automatically
* performed based the protocol embedded in the injected message and on how the
* transport is configured on the system.
*
* Note that other common general entries are available under transport/ to let
* the user applications properly make up their expectations in terms of
* timeouts and message characteristics.
*
* Each write to the message* entries causes one command request to be built
* and sent while the replies or delayed response are read back from those same
* entries one message at time (receiving an EOF at each message boundary).
*
* The user application running the test is in charge of handling timeouts
* on replies and properly choosing SCMI sequence numbers for the outgoing
* requests (using the same sequence number is supported but discouraged).
*
* Injection of multiple in-flight requests is supported as long as the user
* application uses properly distinct sequence numbers for concurrent requests
* and takes care to properly manage all the related issues about concurrency
* and command/reply pairing. Keep in mind that, anyway, the real level of
* parallelism attainable in such scenario is dependent on the characteristics
* of the underlying transport being used.
*
* Since the SCMI core regular stack is partially used to deliver and collect
* the messages, late replies arrived after timeouts and any other sort of
* unexpected message can be identified by the SCMI core as usual and they will
* be reported as messages under "errors" for later analysis.
*/
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include "common.h"
#include "raw_mode.h"
#include <trace/events/scmi.h>
#define SCMI_XFER_RAW_MAX_RETRIES 10
/**
* struct scmi_raw_queue - Generic Raw queue descriptor
*
* @free_bufs: A freelists listhead used to keep unused raw buffers
* @free_bufs_lock: Spinlock used to protect access to @free_bufs
* @msg_q: A listhead to a queue of snooped messages waiting to be read out
* @msg_q_lock: Spinlock used to protect access to @msg_q
* @wq: A waitqueue used to wait and poll on related @msg_q
*/
struct scmi_raw_queue {
struct list_head free_bufs;
/* Protect free_bufs[] lists */
spinlock_t free_bufs_lock;
struct list_head msg_q;
/* Protect msg_q[] lists */
spinlock_t msg_q_lock;
wait_queue_head_t wq;
};
/**
* struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
*
* @id: Sequential Raw instance ID.
* @handle: Pointer to SCMI entity handle to use
* @desc: Pointer to the transport descriptor to use
* @tx_max_msg: Maximum number of concurrent TX in-flight messages
* @q: An array of Raw queue descriptors
* @chans_q: An XArray mapping optional additional per-channel queues
* @free_waiters: Head of freelist for unused waiters
* @free_mtx: A mutex to protect the waiters freelist
* @active_waiters: Head of list for currently active and used waiters
* @active_mtx: A mutex to protect the active waiters list
* @waiters_work: A work descriptor to be used with the workqueue machinery
* @wait_wq: A workqueue reference to the created workqueue
* @dentry: Top debugfs root dentry for SCMI Raw
* @gid: A group ID used for devres accounting
*
* Note that this descriptor is passed back to the core after SCMI Raw is
* initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
*
*/
struct scmi_raw_mode_info {
unsigned int id;
const struct scmi_handle *handle;
const struct scmi_desc *desc;
int tx_max_msg;
struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE];
struct xarray chans_q;
struct list_head free_waiters;
/* Protect free_waiters list */
struct mutex free_mtx;
struct list_head active_waiters;
/* Protect active_waiters list */
struct mutex active_mtx;
struct work_struct waiters_work;
struct workqueue_struct *wait_wq;
struct dentry *dentry;
void *gid;
};
/**
* struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
*
* @start_jiffies: The timestamp in jiffies of when this structure was queued.
* @cinfo: A reference to the channel to use for this transaction
* @xfer: A reference to the xfer to be waited for
* @async_response: A completion to be, optionally, used for async waits: it
* will be setup by @scmi_do_xfer_raw_start, if needed, to be
* pointed at by xfer->async_done.
* @node: A list node.
*/
struct scmi_xfer_raw_waiter {
unsigned long start_jiffies;
struct scmi_chan_info *cinfo;
struct scmi_xfer *xfer;
struct completion async_response;
struct list_head node;
};
/**
* struct scmi_raw_buffer - Structure to hold a full SCMI message
*
* @max_len: The maximum allowed message size (header included) that can be
* stored into @msg
* @msg: A message buffer used to collect a full message grabbed from an xfer.
* @node: A list node.
*/
struct scmi_raw_buffer {
size_t max_len;
struct scmi_msg msg;
struct list_head node;
};
/**
* struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
* layer
*
* @chan_id: The preferred channel to use: if zero the channel is automatically
* selected based on protocol.
* @raw: A reference to the Raw instance.
* @tx: A message buffer used to collect TX message on write.
* @tx_size: The effective size of the TX message.
* @tx_req_size: The final expected size of the complete TX message.
* @rx: A message buffer to collect RX message on read.
* @rx_size: The effective size of the RX message.
*/
struct scmi_dbg_raw_data {
u8 chan_id;
struct scmi_raw_mode_info *raw;
struct scmi_msg tx;
size_t tx_size;
size_t tx_req_size;
struct scmi_msg rx;
size_t rx_size;
};
static struct scmi_raw_queue *
scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
unsigned int chan_id)
{
if (!chan_id)
return raw->q[idx];
return xa_load(&raw->chans_q, chan_id);
}
static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q)
{
unsigned long flags;
struct scmi_raw_buffer *rb = NULL;
struct list_head *head = &q->free_bufs;
spin_lock_irqsave(&q->free_bufs_lock, flags);
if (!list_empty(head)) {
rb = list_first_entry(head, struct scmi_raw_buffer, node);
list_del_init(&rb->node);
}
spin_unlock_irqrestore(&q->free_bufs_lock, flags);
return rb;
}
static void scmi_raw_buffer_put(struct scmi_raw_queue *q,
struct scmi_raw_buffer *rb)
{
unsigned long flags;
/* Reset to full buffer length */
rb->msg.len = rb->max_len;
spin_lock_irqsave(&q->free_bufs_lock, flags);
list_add_tail(&rb->node, &q->free_bufs);
spin_unlock_irqrestore(&q->free_bufs_lock, flags);
}
static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q,
struct scmi_raw_buffer *rb)
{
unsigned long flags;
spin_lock_irqsave(&q->msg_q_lock, flags);
list_add_tail(&rb->node, &q->msg_q);
spin_unlock_irqrestore(&q->msg_q_lock, flags);
wake_up_interruptible(&q->wq);
}
static struct scmi_raw_buffer*
scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q)
{
struct scmi_raw_buffer *rb = NULL;
if (!list_empty(&q->msg_q)) {
rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node);
list_del_init(&rb->node);
}
return rb;
}
static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q)
{
unsigned long flags;
struct scmi_raw_buffer *rb;
spin_lock_irqsave(&q->msg_q_lock, flags);
rb = scmi_raw_buffer_dequeue_unlocked(q);
spin_unlock_irqrestore(&q->msg_q_lock, flags);
return rb;
}
static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q)
{
struct scmi_raw_buffer *rb;
do {
rb = scmi_raw_buffer_dequeue(q);
if (rb)
scmi_raw_buffer_put(q, rb);
} while (rb);
}
static struct scmi_xfer_raw_waiter *
scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
struct scmi_chan_info *cinfo, bool async)
{
struct scmi_xfer_raw_waiter *rw = NULL;
mutex_lock(&raw->free_mtx);
if (!list_empty(&raw->free_waiters)) {
rw = list_first_entry(&raw->free_waiters,
struct scmi_xfer_raw_waiter, node);
list_del_init(&rw->node);
if (async) {
reinit_completion(&rw->async_response);
xfer->async_done = &rw->async_response;
}
rw->cinfo = cinfo;
rw->xfer = xfer;
}
mutex_unlock(&raw->free_mtx);
return rw;
}
static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
struct scmi_xfer_raw_waiter *rw)
{
if (rw->xfer) {
rw->xfer->async_done = NULL;
rw->xfer = NULL;
}
mutex_lock(&raw->free_mtx);
list_add_tail(&rw->node, &raw->free_waiters);
mutex_unlock(&raw->free_mtx);
}
static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
struct scmi_xfer_raw_waiter *rw)
{
/* A timestamp for the deferred worker to know how much this has aged */
rw->start_jiffies = jiffies;
trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id,
rw->xfer->hdr.protocol_id,
rw->xfer->hdr.seq,
raw->desc->max_rx_timeout_ms,
rw->xfer->hdr.poll_completion);
mutex_lock(&raw->active_mtx);
list_add_tail(&rw->node, &raw->active_waiters);
mutex_unlock(&raw->active_mtx);
/* kick waiter work */
queue_work(raw->wait_wq, &raw->waiters_work);
}
static struct scmi_xfer_raw_waiter *
scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
{
struct scmi_xfer_raw_waiter *rw = NULL;
mutex_lock(&raw->active_mtx);
if (!list_empty(&raw->active_waiters)) {
rw = list_first_entry(&raw->active_waiters,
struct scmi_xfer_raw_waiter, node);
list_del_init(&rw->node);
}
mutex_unlock(&raw->active_mtx);
return rw;
}
/**
* scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
*
* @work: A reference to the work.
*
* In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
* cannot wait to receive its response (if any) in the context of the injection
* routines so as not to leave the userspace write syscall, which delivered the
* SCMI message to send, pending till eventually a reply is received.
* Userspace should and will poll/wait instead on the read syscalls which will
* be in charge of reading a received reply (if any).
*
* Even though reply messages are collected and reported into the SCMI Raw layer
* on the RX path, nonetheless we have to properly wait for their completion as
* usual (and async_completion too if needed) in order to properly release the
* xfer structure at the end: to do this out of the context of the write/send
* these waiting jobs are delegated to this deferred worker.
*
* Any sent xfer, to be waited for, is timestamped and queued for later
* consumption by this worker: queue aging is accounted for while choosing a
* timeout for the completion, BUT we do not really care here if we end up
* accidentally waiting for a bit too long.
*/
static void scmi_xfer_raw_worker(struct work_struct *work)
{
struct scmi_raw_mode_info *raw;
struct device *dev;
unsigned long max_tmo;
raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
dev = raw->handle->dev;
max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
do {
int ret = 0;
unsigned int timeout_ms;
unsigned long aging;
struct scmi_xfer *xfer;
struct scmi_xfer_raw_waiter *rw;
struct scmi_chan_info *cinfo;
rw = scmi_xfer_raw_waiter_dequeue(raw);
if (!rw)
return;
cinfo = rw->cinfo;
xfer = rw->xfer;
/*
* Waiters are queued by wait-deadline at the end, so some of
* them could have been already expired when processed, BUT we
* have to check the completion status anyway just in case a
* virtually expired (aged) transaction was indeed completed
* fine and we'll have to wait for the asynchronous part (if
* any): for this reason a 1 ms timeout is used for already
* expired/aged xfers.
*/
aging = jiffies - rw->start_jiffies;
timeout_ms = max_tmo > aging ?
jiffies_to_msecs(max_tmo - aging) : 1;
ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer,
timeout_ms);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (raw->desc->ops->mark_txdone)
raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret);
/* Wait also for an async delayed response if needed */
if (!ret && xfer->async_done) {
unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
if (!wait_for_completion_timeout(xfer->async_done, tmo))
dev_err(dev,
"timed out in RAW delayed resp - HDR:%08X\n",
pack_scmi_header(&xfer->hdr));
}
/* Release waiter and xfer */
scmi_xfer_raw_put(raw->handle, xfer);
scmi_xfer_raw_waiter_put(raw, rw);
} while (1);
}
static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
{
int i;
dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++)
scmi_raw_buffer_queue_flush(raw->q[i]);
}
/**
* scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
* bare SCMI message.
*
* @raw: A reference to the Raw instance.
* @buf: A buffer containing the whole SCMI message to send (including the
* header) in little-endian binary formmat.
* @len: Length of the message in @buf.
* @p: A pointer to return the initialized Raw xfer.
*
* After an xfer is picked from the TX pool and filled in with the message
* content, the xfer is registered as pending with the core in the usual way
* using the original sequence number provided by the user with the message.
*
* Note that, in case the testing user application is NOT using distinct
* sequence-numbers between successive SCMI messages such registration could
* fail temporarily if the previous message, using the same sequence number,
* had still not released; in such a case we just wait and retry.
*
* Return: 0 on Success
*/
static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
size_t len, struct scmi_xfer **p)
{
u32 msg_hdr;
size_t tx_size;
struct scmi_xfer *xfer;
int ret, retry = SCMI_XFER_RAW_MAX_RETRIES;
struct device *dev = raw->handle->dev;
if (!buf || len < sizeof(u32))
return -EINVAL;
tx_size = len - sizeof(u32);
/* Ensure we have sane transfer sizes */
if (tx_size > raw->desc->max_msg_size)
return -ERANGE;
xfer = scmi_xfer_raw_get(raw->handle);
if (IS_ERR(xfer)) {
dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
return PTR_ERR(xfer);
}
/* Build xfer from the provided SCMI bare LE message */
msg_hdr = le32_to_cpu(*((__le32 *)buf));
unpack_scmi_header(msg_hdr, &xfer->hdr);
xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr);
/* Polling not supported */
xfer->hdr.poll_completion = false;
xfer->hdr.status = SCMI_SUCCESS;
xfer->tx.len = tx_size;
xfer->rx.len = raw->desc->max_msg_size;
/* Clear the whole TX buffer */
memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
if (xfer->tx.len)
memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len);
*p = xfer;
/*
* In flight registration can temporarily fail in case of Raw messages
* if the user injects messages without using monotonically increasing
* sequence numbers since, in Raw mode, the xfer (and the token) is
* finally released later by a deferred worker. Just retry for a while.
*/
do {
ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
if (ret) {
dev_dbg(dev,
"...retrying[%d] inflight registration\n",
retry);
msleep(raw->desc->max_rx_timeout_ms /
SCMI_XFER_RAW_MAX_RETRIES);
}
} while (ret && --retry);
if (ret) {
dev_warn(dev,
"RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
xfer->hdr.seq, msg_hdr);
scmi_xfer_raw_put(raw->handle, xfer);
}
return ret;
}
/**
* scmi_do_xfer_raw_start - An helper to send a valid raw xfer
*
* @raw: A reference to the Raw instance.
* @xfer: The xfer to send
* @chan_id: The channel ID to use, if zero the channels is automatically
* selected based on the protocol used.
* @async: A flag stating if an asynchronous command is required.
*
* This function send a previously built raw xfer using an appropriate channel
* and queues the related waiting work.
*
* Note that we need to know explicitly if the required command is meant to be
* asynchronous in kind since we have to properly setup the waiter.
* (and deducing this from the payload is weak and do not scale given there is
* NOT a common header-flag stating if the command is asynchronous or not)
*
* Return: 0 on Success
*/
static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
struct scmi_xfer *xfer, u8 chan_id,
bool async)
{
int ret;
struct scmi_chan_info *cinfo;
struct scmi_xfer_raw_waiter *rw;
struct device *dev = raw->handle->dev;
if (!chan_id)
chan_id = xfer->hdr.protocol_id;
else
xfer->flags |= SCMI_XFER_FLAG_CHAN_SET;
cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
if (IS_ERR(cinfo))
return PTR_ERR(cinfo);
rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
if (!rw) {
dev_warn(dev, "RAW - Cannot get a free waiter !\n");
return -ENOMEM;
}
/* True ONLY if also supported by transport. */
if (is_polling_enabled(cinfo, raw->desc))
xfer->hdr.poll_completion = true;
reinit_completion(&xfer->done);
/* Make sure xfer state update is visible before sending */
smp_store_mb(xfer->state, SCMI_XFER_SENT_OK);
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
ret = raw->desc->ops->send_message(rw->cinfo, xfer);
if (ret) {
dev_err(dev, "Failed to send RAW message %d\n", ret);
scmi_xfer_raw_waiter_put(raw, rw);
return ret;
}
trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "cmnd", xfer->hdr.seq,
xfer->hdr.status,
xfer->tx.buf, xfer->tx.len);
scmi_xfer_raw_waiter_enqueue(raw, rw);
return ret;
}
/**
* scmi_raw_message_send - An helper to build and send an SCMI command using
* the provided SCMI bare message buffer
*
* @raw: A reference to the Raw instance.
* @buf: A buffer containing the whole SCMI message to send (including the
* header) in little-endian binary format.
* @len: Length of the message in @buf.
* @chan_id: The channel ID to use.
* @async: A flag stating if an asynchronous command is required.
*
* Return: 0 on Success
*/
static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
void *buf, size_t len, u8 chan_id, bool async)
{
int ret;
struct scmi_xfer *xfer;
ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
if (ret)
return ret;
ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
if (ret)
scmi_xfer_raw_put(raw->handle, xfer);
return ret;
}
static struct scmi_raw_buffer *
scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock)
{
unsigned long flags;
struct scmi_raw_buffer *rb;
spin_lock_irqsave(&q->msg_q_lock, flags);
while (list_empty(&q->msg_q)) {
spin_unlock_irqrestore(&q->msg_q_lock, flags);
if (o_nonblock)
return ERR_PTR(-EAGAIN);
if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q)))
return ERR_PTR(-ERESTARTSYS);
spin_lock_irqsave(&q->msg_q_lock, flags);
}
rb = scmi_raw_buffer_dequeue_unlocked(q);
spin_unlock_irqrestore(&q->msg_q_lock, flags);
return rb;
}
/**
* scmi_raw_message_receive - An helper to dequeue and report the next
* available enqueued raw message payload that has been collected.
*
* @raw: A reference to the Raw instance.
* @buf: A buffer to get hold of the whole SCMI message received and represented
* in little-endian binary format.
* @len: Length of @buf.
* @size: The effective size of the message copied into @buf
* @idx: The index of the queue to pick the next queued message from.
* @chan_id: The channel ID to use.
* @o_nonblock: A flag to request a non-blocking message dequeue.
*
* Return: 0 on Success
*/
static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
void *buf, size_t len, size_t *size,
unsigned int idx, unsigned int chan_id,
bool o_nonblock)
{
int ret = 0;
struct scmi_raw_buffer *rb;
struct scmi_raw_queue *q;
q = scmi_raw_queue_select(raw, idx, chan_id);
if (!q)
return -ENODEV;
rb = scmi_raw_message_dequeue(q, o_nonblock);
if (IS_ERR(rb)) {
dev_dbg(raw->handle->dev, "RAW - No message available!\n");
return PTR_ERR(rb);
}
if (rb->msg.len <= len) {
memcpy(buf, rb->msg.buf, rb->msg.len);
*size = rb->msg.len;
} else {
ret = -ENOSPC;
}
scmi_raw_buffer_put(q, rb);
return ret;
}
/* SCMI Raw debugfs helpers */
static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos,
unsigned int idx)
{
ssize_t cnt;
struct scmi_dbg_raw_data *rd = filp->private_data;
if (!rd->rx_size) {
int ret;
ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
&rd->rx_size, idx, rd->chan_id,
filp->f_flags & O_NONBLOCK);
if (ret) {
rd->rx_size = 0;
return ret;
}
/* Reset any previous filepos change, including writes */
*ppos = 0;
} else if (*ppos == rd->rx_size) {
/* Return EOF once all the message has been read-out */
rd->rx_size = 0;
return 0;
}
cnt = simple_read_from_buffer(buf, count, ppos,
rd->rx.buf, rd->rx_size);
return cnt;
}
static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos,
bool async)
{
int ret;
struct scmi_dbg_raw_data *rd = filp->private_data;
if (count > rd->tx.len - rd->tx_size)
return -ENOSPC;
/* On first write attempt @count carries the total full message size. */
if (!rd->tx_size)
rd->tx_req_size = count;
/*
* Gather a full message, possibly across multiple interrupted wrrtes,
* before sending it with a single RAW xfer.
*/
if (rd->tx_size < rd->tx_req_size) {
ssize_t cnt;
cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos,
buf, count);
if (cnt < 0)
return cnt;
rd->tx_size += cnt;
if (cnt < count)
return cnt;
}
ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
rd->chan_id, async);
/* Reset ppos for next message ... */
rd->tx_size = 0;
*ppos = 0;
return ret ?: count;
}
static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp,
struct poll_table_struct *wait,
unsigned int idx)
{
unsigned long flags;
struct scmi_dbg_raw_data *rd = filp->private_data;
struct scmi_raw_queue *q;
__poll_t mask = 0;
q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
if (!q)
return mask;
poll_wait(filp, &q->wq, wait);
spin_lock_irqsave(&q->msg_q_lock, flags);
if (!list_empty(&q->msg_q))
mask = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&q->msg_q_lock, flags);
return mask;
}
static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
SCMI_RAW_REPLY_QUEUE);
}
static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false);
}
static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp,
struct poll_table_struct *wait)
{
return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE);
}
static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
{
u8 id;
struct scmi_raw_mode_info *raw;
struct scmi_dbg_raw_data *rd;
const char *id_str = filp->f_path.dentry->d_parent->d_name.name;
if (!inode->i_private)
return -ENODEV;
raw = inode->i_private;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL);
if (!rd->rx.buf) {
kfree(rd);
return -ENOMEM;
}
rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL);
if (!rd->tx.buf) {
kfree(rd->rx.buf);
kfree(rd);
return -ENOMEM;
}
/* Grab channel ID from debugfs entry naming if any */
if (!kstrtou8(id_str, 16, &id))
rd->chan_id = id;
rd->raw = raw;
filp->private_data = rd;
return 0;
}
static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
{
struct scmi_dbg_raw_data *rd = filp->private_data;
kfree(rd->rx.buf);
kfree(rd->tx.buf);
kfree(rd);
return 0;
}
static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct scmi_dbg_raw_data *rd = filp->private_data;
scmi_xfer_raw_reset(rd->raw);
return count;
}
static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.write = scmi_dbg_raw_mode_reset_write,
.owner = THIS_MODULE,
};
static const struct file_operations scmi_dbg_raw_mode_message_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_write,
.poll = scmi_dbg_raw_mode_message_poll,
.owner = THIS_MODULE,
};
static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
{
return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true);
}
static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.read = scmi_dbg_raw_mode_message_read,
.write = scmi_dbg_raw_mode_message_async_write,
.poll = scmi_dbg_raw_mode_message_poll,
.owner = THIS_MODULE,
};
static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
SCMI_RAW_NOTIF_QUEUE);
}
static __poll_t
scmi_test_dbg_raw_mode_notif_poll(struct file *filp,
struct poll_table_struct *wait)
{
return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE);
}
static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_notif_read,
.poll = scmi_test_dbg_raw_mode_notif_poll,
.owner = THIS_MODULE,
};
static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp,
char __user *buf,
size_t count, loff_t *ppos)
{
return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos,
SCMI_RAW_ERRS_QUEUE);
}
static __poll_t
scmi_test_dbg_raw_mode_errors_poll(struct file *filp,
struct poll_table_struct *wait)
{
return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE);
}
static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
.open = scmi_dbg_raw_mode_open,
.release = scmi_dbg_raw_mode_release,
.read = scmi_test_dbg_raw_mode_errors_read,
.poll = scmi_test_dbg_raw_mode_errors_poll,
.owner = THIS_MODULE,
};
static struct scmi_raw_queue *
scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
{
int i;
struct scmi_raw_buffer *rb;
struct device *dev = raw->handle->dev;
struct scmi_raw_queue *q;
q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
if (!rb)
return ERR_PTR(-ENOMEM);
spin_lock_init(&q->free_bufs_lock);
INIT_LIST_HEAD(&q->free_bufs);
for (i = 0; i < raw->tx_max_msg; i++, rb++) {
rb->max_len = raw->desc->max_msg_size + sizeof(u32);
rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL);
if (!rb->msg.buf)
return ERR_PTR(-ENOMEM);
scmi_raw_buffer_put(q, rb);
}
spin_lock_init(&q->msg_q_lock);
INIT_LIST_HEAD(&q->msg_q);
init_waitqueue_head(&q->wq);
return q;
}
static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
{
int i;
struct scmi_xfer_raw_waiter *rw;
struct device *dev = raw->handle->dev;
rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
if (!rw)
return -ENOMEM;
raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
WQ_UNBOUND | WQ_FREEZABLE |
WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
if (!raw->wait_wq)
return -ENOMEM;
mutex_init(&raw->free_mtx);
INIT_LIST_HEAD(&raw->free_waiters);
mutex_init(&raw->active_mtx);
INIT_LIST_HEAD(&raw->active_waiters);
for (i = 0; i < raw->tx_max_msg; i++, rw++) {
init_completion(&rw->async_response);
scmi_xfer_raw_waiter_put(raw, rw);
}
INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
return 0;
}
static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
u8 *channels, int num_chans)
{
int ret, idx;
void *gid;
struct device *dev = raw->handle->dev;
gid = devres_open_group(dev, NULL, GFP_KERNEL);
if (!gid)
return -ENOMEM;
for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) {
raw->q[idx] = scmi_raw_queue_init(raw);
if (IS_ERR(raw->q[idx])) {
ret = PTR_ERR(raw->q[idx]);
goto err;
}
}
xa_init(&raw->chans_q);
if (num_chans > 1) {
int i;
for (i = 0; i < num_chans; i++) {
void *xret;
struct scmi_raw_queue *q;
q = scmi_raw_queue_init(raw);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto err_xa;
}
xret = xa_store(&raw->chans_q, channels[i], q,
GFP_KERNEL);
if (xa_err(xret)) {
dev_err(dev,
"Fail to allocate Raw queue 0x%02X\n",
channels[i]);
ret = xa_err(xret);
goto err_xa;
}
}
}
ret = scmi_xfer_raw_worker_init(raw);
if (ret)
goto err_xa;
devres_close_group(dev, gid);
raw->gid = gid;
return 0;
err_xa:
xa_destroy(&raw->chans_q);
err:
devres_release_group(dev, gid);
return ret;
}
/**
* scmi_raw_mode_init - Function to initialize the SCMI Raw stack
*
* @handle: Pointer to SCMI entity handle
* @top_dentry: A reference to the top Raw debugfs dentry
* @instance_id: The ID of the underlying SCMI platform instance represented by
* this Raw instance
* @channels: The list of the existing channels
* @num_chans: The number of entries in @channels
* @desc: Reference to the transport operations
* @tx_max_msg: Max number of in-flight messages allowed by the transport
*
* This function prepare the SCMI Raw stack and creates the debugfs API.
*
* Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
*/
void *scmi_raw_mode_init(const struct scmi_handle *handle,
struct dentry *top_dentry, int instance_id,
u8 *channels, int num_chans,
const struct scmi_desc *desc, int tx_max_msg)
{
int ret;
struct scmi_raw_mode_info *raw;
struct device *dev;
if (!handle || !desc)
return ERR_PTR(-EINVAL);
dev = handle->dev;
raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
if (!raw)
return ERR_PTR(-ENOMEM);
raw->handle = handle;
raw->desc = desc;
raw->tx_max_msg = tx_max_msg;
raw->id = instance_id;
ret = scmi_raw_mode_setup(raw, channels, num_chans);
if (ret) {
devm_kfree(dev, raw);
return ERR_PTR(ret);
}
raw->dentry = debugfs_create_dir("raw", top_dentry);
debugfs_create_file("reset", 0200, raw->dentry, raw,
&scmi_dbg_raw_mode_reset_fops);
debugfs_create_file("message", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_fops);
debugfs_create_file("message_async", 0600, raw->dentry, raw,
&scmi_dbg_raw_mode_message_async_fops);
debugfs_create_file("notification", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_notification_fops);
debugfs_create_file("errors", 0400, raw->dentry, raw,
&scmi_dbg_raw_mode_errors_fops);
/*
* Expose per-channel entries if multiple channels available.
* Just ignore errors while setting up these interfaces since we
* have anyway already a working core Raw support.
*/
if (num_chans > 1) {
int i;
struct dentry *top_chans;
top_chans = debugfs_create_dir("channels", raw->dentry);
for (i = 0; i < num_chans; i++) {
char cdir[8];
struct dentry *chd;
snprintf(cdir, 8, "0x%02X", channels[i]);
chd = debugfs_create_dir(cdir, top_chans);
debugfs_create_file("message", 0600, chd, raw,
&scmi_dbg_raw_mode_message_fops);
debugfs_create_file("message_async", 0600, chd, raw,
&scmi_dbg_raw_mode_message_async_fops);
}
}
dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
return raw;
}
/**
* scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
*
* @r: An opaque handle to an initialized SCMI Raw instance
*/
void scmi_raw_mode_cleanup(void *r)
{
struct scmi_raw_mode_info *raw = r;
if (!raw)
return;
debugfs_remove_recursive(raw->dentry);
cancel_work_sync(&raw->waiters_work);
destroy_workqueue(raw->wait_wq);
xa_destroy(&raw->chans_q);
}
static int scmi_xfer_raw_collect(void *msg, size_t *msg_len,
struct scmi_xfer *xfer)
{
__le32 *m;
size_t msg_size;
if (!xfer || !msg || !msg_len)
return -EINVAL;
/* Account for hdr ...*/
msg_size = xfer->rx.len + sizeof(u32);
/* ... and status if needed */
if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
msg_size += sizeof(u32);
if (msg_size > *msg_len)
return -ENOSPC;
m = msg;
*m = cpu_to_le32(pack_scmi_header(&xfer->hdr));
if (xfer->hdr.type != MSG_TYPE_NOTIFICATION)
*++m = cpu_to_le32(xfer->hdr.status);
memcpy(++m, xfer->rx.buf, xfer->rx.len);
*msg_len = msg_size;
return 0;
}
/**
* scmi_raw_message_report - Helper to report back valid reponses/notifications
* to raw message requests.
*
* @r: An opaque reference to the raw instance configuration
* @xfer: The xfer containing the message to be reported
* @idx: The index of the queue.
* @chan_id: The channel ID to use.
*
* If Raw mode is enabled, this is called from the SCMI core on the regular RX
* path to save and enqueue the response/notification payload carried by this
* xfer into a dedicated scmi_raw_buffer for later consumption by the user.
*
* This way the caller can free the related xfer immediately afterwards and the
* user can read back the raw message payload at its own pace (if ever) without
* holding an xfer for too long.
*/
void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
unsigned int idx, unsigned int chan_id)
{
int ret;
unsigned long flags;
struct scmi_raw_buffer *rb;
struct device *dev;
struct scmi_raw_queue *q;
struct scmi_raw_mode_info *raw = r;
if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
return;
dev = raw->handle->dev;
q = scmi_raw_queue_select(raw, idx,
SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
/*
* Grab the msg_q_lock upfront to avoid a possible race between
* realizing the free list was empty and effectively picking the next
* buffer to use from the oldest one enqueued and still unread on this
* msg_q.
*
* Note that nowhere else these locks are taken together, so no risk of
* deadlocks du eto inversion.
*/
spin_lock_irqsave(&q->msg_q_lock, flags);
rb = scmi_raw_buffer_get(q);
if (!rb) {
/*
* Immediate and delayed replies to previously injected Raw
* commands MUST be read back from userspace to free the buffers:
* if this is not happening something is seriously broken and
* must be fixed at the application level: complain loudly.
*/
if (idx == SCMI_RAW_REPLY_QUEUE) {
spin_unlock_irqrestore(&q->msg_q_lock, flags);
dev_warn(dev,
"RAW[%d] - Buffers exhausted. Dropping report.\n",
idx);
return;
}
/*
* Notifications and errors queues are instead handled in a
* circular manner: unread old buffers are just overwritten by
* newer ones.
*
* The main reason for this is that notifications originated
* by Raw requests cannot be distinguished from normal ones, so
* your Raw buffers queues risk to be flooded and depleted by
* notifications if you left it mistakenly enabled or when in
* coexistence mode.
*/
rb = scmi_raw_buffer_dequeue_unlocked(q);
if (WARN_ON(!rb)) {
spin_unlock_irqrestore(&q->msg_q_lock, flags);
return;
}
/* Reset to full buffer length */
rb->msg.len = rb->max_len;
dev_warn_once(dev,
"RAW[%d] - Buffers exhausted. Re-using oldest.\n",
idx);
}
spin_unlock_irqrestore(&q->msg_q_lock, flags);
ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer);
if (ret) {
dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
scmi_raw_buffer_put(q, rb);
return;
}
scmi_raw_buffer_enqueue(q, rb);
}
static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, u32 msg_hdr)
{
/* Unpack received HDR as it is */
unpack_scmi_header(msg_hdr, &xfer->hdr);
xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
memset(xfer->rx.buf, 0x00, xfer->rx.len);
raw->desc->ops->fetch_response(cinfo, xfer);
}
/**
* scmi_raw_error_report - Helper to report back timed-out or generally
* unexpected replies.
*
* @r: An opaque reference to the raw instance configuration
* @cinfo: A reference to the channel to use to retrieve the broken xfer
* @msg_hdr: The SCMI message header of the message to fetch and report
* @priv: Any private data related to the xfer.
*
* If Raw mode is enabled, this is called from the SCMI core on the RX path in
* case of errors to save and enqueue the bad message payload carried by the
* message that has just been received.
*
* Note that we have to manually fetch any available payload into a temporary
* xfer to be able to save and enqueue the message, since the regular RX error
* path which had called this would have not fetched the message payload having
* classified it as an error.
*/
void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
struct scmi_xfer xfer;
struct scmi_raw_mode_info *raw = r;
if (!raw)
return;
xfer.rx.len = raw->desc->max_msg_size;
xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC);
if (!xfer.rx.buf) {
dev_info(raw->handle->dev,
"Cannot report Raw error for HDR:0x%X - ENOMEM\n",
msg_hdr);
return;
}
/* Any transport-provided priv must be passed back down to transport */
if (priv)
/* Ensure priv is visible */
smp_store_mb(xfer.priv, priv);
scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);
kfree(xfer.rx.buf);
}
| linux-master | drivers/firmware/arm_scmi/raw_mode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Virtio Transport driver for Arm System Control and Management Interface
* (SCMI).
*
* Copyright (C) 2020-2022 OpenSynergy.
* Copyright (C) 2021-2022 ARM Ltd.
*/
/**
* DOC: Theory of Operation
*
* The scmi-virtio transport implements a driver for the virtio SCMI device.
*
* There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx
* channel (virtio eventq, P2A channel). Each channel is implemented through a
* virtqueue. Access to each virtqueue is protected by spinlocks.
*/
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_scmi.h>
#include "common.h"
#define VIRTIO_MAX_RX_TIMEOUT_MS 60000
#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
#define VIRTIO_SCMI_MAX_PDU_SIZE \
(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
#define DESCRIPTORS_PER_TX_MSG 2
/**
* struct scmi_vio_channel - Transport channel information
*
* @vqueue: Associated virtqueue
* @cinfo: SCMI Tx or Rx channel
* @free_lock: Protects access to the @free_list.
* @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
* @deferred_tx_work: Worker for TX deferred replies processing
* @deferred_tx_wq: Workqueue for TX deferred replies
* @pending_lock: Protects access to the @pending_cmds_list.
* @pending_cmds_list: List of pre-fetched commands queueud for later processing
* @is_rx: Whether channel is an Rx channel
* @max_msg: Maximum number of pending messages for this channel.
* @lock: Protects access to all members except users, free_list and
* pending_cmds_list.
* @shutdown_done: A reference to a completion used when freeing this channel.
* @users: A reference count to currently active users of this channel.
*/
struct scmi_vio_channel {
struct virtqueue *vqueue;
struct scmi_chan_info *cinfo;
/* lock to protect access to the free list. */
spinlock_t free_lock;
struct list_head free_list;
/* lock to protect access to the pending list. */
spinlock_t pending_lock;
struct list_head pending_cmds_list;
struct work_struct deferred_tx_work;
struct workqueue_struct *deferred_tx_wq;
bool is_rx;
unsigned int max_msg;
/*
* Lock to protect access to all members except users, free_list and
* pending_cmds_list
*/
spinlock_t lock;
struct completion *shutdown_done;
refcount_t users;
};
enum poll_states {
VIO_MSG_NOT_POLLED,
VIO_MSG_POLL_TIMEOUT,
VIO_MSG_POLLING,
VIO_MSG_POLL_DONE,
};
/**
* struct scmi_vio_msg - Transport PDU information
*
* @request: SDU used for commands
* @input: SDU used for (delayed) responses and notifications
* @list: List which scmi_vio_msg may be part of
* @rx_len: Input SDU size in bytes, once input has been received
* @poll_idx: Last used index registered for polling purposes if this message
* transaction reply was configured for polling.
* @poll_status: Polling state for this message.
* @poll_lock: A lock to protect @poll_status
* @users: A reference count to track this message users and avoid premature
* freeing (and reuse) when polling and IRQ execution paths interleave.
*/
struct scmi_vio_msg {
struct scmi_msg_payld *request;
struct scmi_msg_payld *input;
struct list_head list;
unsigned int rx_len;
unsigned int poll_idx;
enum poll_states poll_status;
/* Lock to protect access to poll_status */
spinlock_t poll_lock;
refcount_t users;
};
/* Only one SCMI VirtIO device can possibly exist */
static struct virtio_device *scmi_vdev;
static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
struct scmi_chan_info *cinfo)
{
unsigned long flags;
spin_lock_irqsave(&vioch->lock, flags);
cinfo->transport_info = vioch;
/* Indirectly setting channel not available any more */
vioch->cinfo = cinfo;
spin_unlock_irqrestore(&vioch->lock, flags);
refcount_set(&vioch->users, 1);
}
static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
{
return refcount_inc_not_zero(&vioch->users);
}
static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
{
if (refcount_dec_and_test(&vioch->users)) {
unsigned long flags;
spin_lock_irqsave(&vioch->lock, flags);
if (vioch->shutdown_done) {
vioch->cinfo = NULL;
complete(vioch->shutdown_done);
}
spin_unlock_irqrestore(&vioch->lock, flags);
}
}
static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
{
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
/*
* Prepare to wait for the last release if not already released
* or in progress.
*/
spin_lock_irqsave(&vioch->lock, flags);
if (!vioch->cinfo || vioch->shutdown_done) {
spin_unlock_irqrestore(&vioch->lock, flags);
return;
}
vioch->shutdown_done = &vioch_shutdown_done;
if (!vioch->is_rx && vioch->deferred_tx_wq)
/* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL;
spin_unlock_irqrestore(&vioch->lock, flags);
scmi_vio_channel_release(vioch);
/* Let any possibly concurrent RX path release the channel */
wait_for_completion(vioch->shutdown_done);
}
/* Assumes to be called with vio channel acquired already */
static struct scmi_vio_msg *
scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)
{
unsigned long flags;
struct scmi_vio_msg *msg;
spin_lock_irqsave(&vioch->free_lock, flags);
if (list_empty(&vioch->free_list)) {
spin_unlock_irqrestore(&vioch->free_lock, flags);
return NULL;
}
msg = list_first_entry(&vioch->free_list, typeof(*msg), list);
list_del_init(&msg->list);
spin_unlock_irqrestore(&vioch->free_lock, flags);
/* Still no users, no need to acquire poll_lock */
msg->poll_status = VIO_MSG_NOT_POLLED;
refcount_set(&msg->users, 1);
return msg;
}
static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)
{
return refcount_inc_not_zero(&msg->users);
}
/* Assumes to be called with vio channel acquired already */
static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
bool ret;
ret = refcount_dec_and_test(&msg->users);
if (ret) {
unsigned long flags;
spin_lock_irqsave(&vioch->free_lock, flags);
list_add_tail(&msg->list, &vioch->free_list);
spin_unlock_irqrestore(&vioch->free_lock, flags);
}
return ret;
}
static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
{
return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
}
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
struct scatterlist sg_in;
int rc;
unsigned long flags;
struct device *dev = &vioch->vqueue->vdev->dev;
sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE);
spin_lock_irqsave(&vioch->lock, flags);
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc)
dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
spin_unlock_irqrestore(&vioch->lock, flags);
return rc;
}
/*
* Assume to be called with channel already acquired or not ready at all;
* vioch->lock MUST NOT have been already acquired.
*/
static void scmi_finalize_message(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
if (vioch->is_rx)
scmi_vio_feed_vq_rx(vioch, msg);
else
scmi_vio_msg_release(vioch, msg);
}
static void scmi_vio_complete_cb(struct virtqueue *vqueue)
{
unsigned long flags;
unsigned int length;
struct scmi_vio_channel *vioch;
struct scmi_vio_msg *msg;
bool cb_enabled = true;
if (WARN_ON_ONCE(!vqueue->vdev->priv))
return;
vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
for (;;) {
if (!scmi_vio_channel_acquire(vioch))
return;
spin_lock_irqsave(&vioch->lock, flags);
if (cb_enabled) {
virtqueue_disable_cb(vqueue);
cb_enabled = false;
}
msg = virtqueue_get_buf(vqueue, &length);
if (!msg) {
if (virtqueue_enable_cb(vqueue)) {
spin_unlock_irqrestore(&vioch->lock, flags);
scmi_vio_channel_release(vioch);
return;
}
cb_enabled = true;
}
spin_unlock_irqrestore(&vioch->lock, flags);
if (msg) {
msg->rx_len = length;
scmi_rx_callback(vioch->cinfo,
msg_read_header(msg->input), msg);
scmi_finalize_message(vioch, msg);
}
/*
* Release vio channel between loop iterations to allow
* virtio_chan_free() to eventually fully release it when
* shutting down; in such a case, any outstanding message will
* be ignored since this loop will bail out at the next
* iteration.
*/
scmi_vio_channel_release(vioch);
}
}
static void scmi_vio_deferred_tx_worker(struct work_struct *work)
{
unsigned long flags;
struct scmi_vio_channel *vioch;
struct scmi_vio_msg *msg, *tmp;
vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);
if (!scmi_vio_channel_acquire(vioch))
return;
/*
* Process pre-fetched messages: these could be non-polled messages or
* late timed-out replies to polled messages dequeued by chance while
* polling for some other messages: this worker is in charge to process
* the valid non-expired messages and anyway finally free all of them.
*/
spin_lock_irqsave(&vioch->pending_lock, flags);
/* Scan the list of possibly pre-fetched messages during polling. */
list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {
list_del(&msg->list);
/*
* Channel is acquired here (cannot vanish) and this message
* is no more processed elsewhere so no poll_lock needed.
*/
if (msg->poll_status == VIO_MSG_NOT_POLLED)
scmi_rx_callback(vioch->cinfo,
msg_read_header(msg->input), msg);
/* Free the processed message once done */
scmi_vio_msg_release(vioch, msg);
}
spin_unlock_irqrestore(&vioch->pending_lock, flags);
/* Process possibly still pending messages */
scmi_vio_complete_cb(vioch->vqueue);
scmi_vio_channel_release(vioch);
}
static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
static vq_callback_t *scmi_vio_complete_callbacks[] = {
scmi_vio_complete_cb,
scmi_vio_complete_cb
};
static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)
{
struct scmi_vio_channel *vioch = base_cinfo->transport_info;
return vioch->max_msg;
}
static int virtio_link_supplier(struct device *dev)
{
if (!scmi_vdev) {
dev_notice(dev,
"Deferring probe after not finding a bound scmi-virtio device\n");
return -EPROBE_DEFER;
}
if (!device_link_add(dev, &scmi_vdev->dev,
DL_FLAG_AUTOREMOVE_CONSUMER)) {
dev_err(dev, "Adding link to supplier virtio device failed\n");
return -ECANCELED;
}
return 0;
}
static bool virtio_chan_available(struct device_node *of_node, int idx)
{
struct scmi_vio_channel *channels, *vioch = NULL;
if (WARN_ON_ONCE(!scmi_vdev))
return false;
channels = (struct scmi_vio_channel *)scmi_vdev->priv;
switch (idx) {
case VIRTIO_SCMI_VQ_TX:
vioch = &channels[VIRTIO_SCMI_VQ_TX];
break;
case VIRTIO_SCMI_VQ_RX:
if (scmi_vio_have_vq_rx(scmi_vdev))
vioch = &channels[VIRTIO_SCMI_VQ_RX];
break;
default:
return false;
}
return vioch && !vioch->cinfo;
}
static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
{
destroy_workqueue(deferred_tx_wq);
}
static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
struct scmi_vio_channel *vioch;
int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;
int i;
if (!scmi_vdev)
return -EPROBE_DEFER;
vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];
/* Setup a deferred worker for polling. */
if (tx && !vioch->deferred_tx_wq) {
int ret;
vioch->deferred_tx_wq =
alloc_workqueue(dev_name(&scmi_vdev->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!vioch->deferred_tx_wq)
return -ENOMEM;
ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,
vioch->deferred_tx_wq);
if (ret)
return ret;
INIT_WORK(&vioch->deferred_tx_work,
scmi_vio_deferred_tx_worker);
}
for (i = 0; i < vioch->max_msg; i++) {
struct scmi_vio_msg *msg;
msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (tx) {
msg->request = devm_kzalloc(dev,
VIRTIO_SCMI_MAX_PDU_SIZE,
GFP_KERNEL);
if (!msg->request)
return -ENOMEM;
spin_lock_init(&msg->poll_lock);
refcount_set(&msg->users, 1);
}
msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE,
GFP_KERNEL);
if (!msg->input)
return -ENOMEM;
scmi_finalize_message(vioch, msg);
}
scmi_vio_channel_ready(vioch, cinfo);
return 0;
}
static int virtio_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_vio_channel *vioch = cinfo->transport_info;
/*
* Break device to inhibit further traffic flowing while shutting down
* the channels: doing it later holding vioch->lock creates unsafe
* locking dependency chains as reported by LOCKDEP.
*/
virtio_break_device(vioch->vqueue->vdev);
scmi_vio_channel_cleanup_sync(vioch);
return 0;
}
static int virtio_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_vio_channel *vioch = cinfo->transport_info;
struct scatterlist sg_out;
struct scatterlist sg_in;
struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };
unsigned long flags;
int rc;
struct scmi_vio_msg *msg;
if (!scmi_vio_channel_acquire(vioch))
return -EINVAL;
msg = scmi_virtio_get_free_msg(vioch);
if (!msg) {
scmi_vio_channel_release(vioch);
return -EBUSY;
}
msg_tx_prepare(msg->request, xfer);
sg_init_one(&sg_out, msg->request, msg_command_size(xfer));
sg_init_one(&sg_in, msg->input, msg_response_size(xfer));
spin_lock_irqsave(&vioch->lock, flags);
/*
* If polling was requested for this transaction:
* - retrieve last used index (will be used as polling reference)
* - bind the polled message to the xfer via .priv
* - grab an additional msg refcount for the poll-path
*/
if (xfer->hdr.poll_completion) {
msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
/* Still no users, no need to acquire poll_lock */
msg->poll_status = VIO_MSG_POLLING;
scmi_vio_msg_acquire(msg);
/* Ensure initialized msg is visibly bound to xfer */
smp_store_mb(xfer->priv, msg);
}
rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);
if (rc)
dev_err(vioch->cinfo->dev,
"failed to add to TX virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
spin_unlock_irqrestore(&vioch->lock, flags);
if (rc) {
/* Ensure order between xfer->priv clear and vq feeding */
smp_store_mb(xfer->priv, NULL);
if (xfer->hdr.poll_completion)
scmi_vio_msg_release(vioch, msg);
scmi_vio_msg_release(vioch, msg);
}
scmi_vio_channel_release(vioch);
return rc;
}
static void virtio_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_vio_msg *msg = xfer->priv;
if (msg)
msg_fetch_response(msg->input, msg->rx_len, xfer);
}
static void virtio_fetch_notification(struct scmi_chan_info *cinfo,
size_t max_len, struct scmi_xfer *xfer)
{
struct scmi_vio_msg *msg = xfer->priv;
if (msg)
msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer);
}
/**
* virtio_mark_txdone - Mark transmission done
*
* Free only completed polling transfer messages.
*
* Note that in the SCMI VirtIO transport we never explicitly release still
* outstanding but timed-out messages by forcibly re-adding them to the
* free-list inside the TX code path; we instead let IRQ/RX callbacks, or the
* TX deferred worker, eventually clean up such messages once, finally, a late
* reply is received and discarded (if ever).
*
* This approach was deemed preferable since those pending timed-out buffers are
* still effectively owned by the SCMI platform VirtIO device even after timeout
* expiration: forcibly freeing and reusing them before they had been returned
* explicitly by the SCMI platform could lead to subtle bugs due to message
* corruption.
* An SCMI platform VirtIO device which never returns message buffers is
* anyway broken and it will quickly lead to exhaustion of available messages.
*
* For this same reason, here, we take care to free only the polled messages
* that had been somehow replied (only if not by chance already processed on the
* IRQ path - the initial scmi_vio_msg_release() takes care of this) and also
* any timed-out polled message if that indeed appears to have been at least
* dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such
* messages won't be freed elsewhere. Any other polled message is marked as
* VIO_MSG_POLL_TIMEOUT.
*
* Possible late replies to timed-out polled messages will be eventually freed
* by RX callbacks if delivered on the IRQ path or by the deferred TX worker if
* dequeued on some other polling path.
*
* @cinfo: SCMI channel info
* @ret: Transmission return code
* @xfer: Transfer descriptor
*/
static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *xfer)
{
unsigned long flags;
struct scmi_vio_channel *vioch = cinfo->transport_info;
struct scmi_vio_msg *msg = xfer->priv;
if (!msg || !scmi_vio_channel_acquire(vioch))
return;
/* Ensure msg is unbound from xfer anyway at this point */
smp_store_mb(xfer->priv, NULL);
/* Must be a polled xfer and not already freed on the IRQ path */
if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {
scmi_vio_channel_release(vioch);
return;
}
spin_lock_irqsave(&msg->poll_lock, flags);
/* Do not free timedout polled messages only if still inflight */
if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)
scmi_vio_msg_release(vioch, msg);
else if (msg->poll_status == VIO_MSG_POLLING)
msg->poll_status = VIO_MSG_POLL_TIMEOUT;
spin_unlock_irqrestore(&msg->poll_lock, flags);
scmi_vio_channel_release(vioch);
}
/**
* virtio_poll_done - Provide polling support for VirtIO transport
*
* @cinfo: SCMI channel info
* @xfer: Reference to the transfer being poll for.
*
* VirtIO core provides a polling mechanism based only on last used indexes:
* this means that it is possible to poll the virtqueues waiting for something
* new to arrive from the host side, but the only way to check if the freshly
* arrived buffer was indeed what we were waiting for is to compare the newly
* arrived message descriptor with the one we are polling on.
*
* As a consequence it can happen to dequeue something different from the buffer
* we were poll-waiting for: if that is the case such early fetched buffers are
* then added to a the @pending_cmds_list list for later processing by a
* dedicated deferred worker.
*
* So, basically, once something new is spotted we proceed to de-queue all the
* freshly received used buffers until we found the one we were polling on, or,
* we have 'seemingly' emptied the virtqueue; if some buffers are still pending
* in the vqueue at the end of the polling loop (possible due to inherent races
* in virtqueues handling mechanisms), we similarly kick the deferred worker
* and let it process those, to avoid indefinitely looping in the .poll_done
* busy-waiting helper.
*
* Finally, we delegate to the deferred worker also the final free of any timed
* out reply to a polled message that we should dequeue.
*
* Note that, since we do NOT have per-message suppress notification mechanism,
* the message we are polling for could be alternatively delivered via usual
* IRQs callbacks on another core which happened to have IRQs enabled while we
* are actively polling for it here: in such a case it will be handled as such
* by scmi_rx_callback() and the polling loop in the SCMI Core TX path will be
* transparently terminated anyway.
*
* Return: True once polling has successfully completed.
*/
static bool virtio_poll_done(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
bool pending, found = false;
unsigned int length, any_prefetched = 0;
unsigned long flags;
struct scmi_vio_msg *next_msg, *msg = xfer->priv;
struct scmi_vio_channel *vioch = cinfo->transport_info;
if (!msg)
return true;
/*
* Processed already by other polling loop on another CPU ?
*
* Note that this message is acquired on the poll path so cannot vanish
* while inside this loop iteration even if concurrently processed on
* the IRQ path.
*
* Avoid to acquire poll_lock since polled_status can be changed
* in a relevant manner only later in this same thread of execution:
* any other possible changes made concurrently by other polling loops
* or by a reply delivered on the IRQ path have no meaningful impact on
* this loop iteration: in other words it is harmless to allow this
* possible race but let has avoid spinlocking with irqs off in this
* initial part of the polling loop.
*/
if (msg->poll_status == VIO_MSG_POLL_DONE)
return true;
if (!scmi_vio_channel_acquire(vioch))
return true;
/* Has cmdq index moved at all ? */
pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
if (!pending) {
scmi_vio_channel_release(vioch);
return false;
}
spin_lock_irqsave(&vioch->lock, flags);
virtqueue_disable_cb(vioch->vqueue);
/*
* Process all new messages till the polled-for message is found OR
* the vqueue is empty.
*/
while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {
bool next_msg_done = false;
/*
* Mark any dequeued buffer message as VIO_MSG_POLL_DONE so
* that can be properly freed even on timeout in mark_txdone.
*/
spin_lock(&next_msg->poll_lock);
if (next_msg->poll_status == VIO_MSG_POLLING) {
next_msg->poll_status = VIO_MSG_POLL_DONE;
next_msg_done = true;
}
spin_unlock(&next_msg->poll_lock);
next_msg->rx_len = length;
/* Is the message we were polling for ? */
if (next_msg == msg) {
found = true;
break;
} else if (next_msg_done) {
/* Skip the rest if this was another polled msg */
continue;
}
/*
* Enqueue for later processing any non-polled message and any
* timed-out polled one that we happen to have dequeued.
*/
spin_lock(&next_msg->poll_lock);
if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||
next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {
spin_unlock(&next_msg->poll_lock);
any_prefetched++;
spin_lock(&vioch->pending_lock);
list_add_tail(&next_msg->list,
&vioch->pending_cmds_list);
spin_unlock(&vioch->pending_lock);
} else {
spin_unlock(&next_msg->poll_lock);
}
}
/*
* When the polling loop has successfully terminated if something
* else was queued in the meantime, it will be served by a deferred
* worker OR by the normal IRQ/callback OR by other poll loops.
*
* If we are still looking for the polled reply, the polling index has
* to be updated to the current vqueue last used index.
*/
if (found) {
pending = !virtqueue_enable_cb(vioch->vqueue);
} else {
msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);
pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);
}
if (vioch->deferred_tx_wq && (any_prefetched || pending))
queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);
spin_unlock_irqrestore(&vioch->lock, flags);
scmi_vio_channel_release(vioch);
return found;
}
static const struct scmi_transport_ops scmi_virtio_ops = {
.link_supplier = virtio_link_supplier,
.chan_available = virtio_chan_available,
.chan_setup = virtio_chan_setup,
.chan_free = virtio_chan_free,
.get_max_msg = virtio_get_max_msg,
.send_message = virtio_send_message,
.fetch_response = virtio_fetch_response,
.fetch_notification = virtio_fetch_notification,
.mark_txdone = virtio_mark_txdone,
.poll_done = virtio_poll_done,
};
static int scmi_vio_probe(struct virtio_device *vdev)
{
struct device *dev = &vdev->dev;
struct scmi_vio_channel *channels;
bool have_vq_rx;
int vq_cnt;
int i;
int ret;
struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];
/* Only one SCMI VirtiO device allowed */
if (scmi_vdev) {
dev_err(dev,
"One SCMI Virtio device was already initialized: only one allowed.\n");
return -EBUSY;
}
have_vq_rx = scmi_vio_have_vq_rx(vdev);
vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;
channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
if (have_vq_rx)
channels[VIRTIO_SCMI_VQ_RX].is_rx = true;
ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks,
scmi_vio_vqueue_names, NULL);
if (ret) {
dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);
return ret;
}
for (i = 0; i < vq_cnt; i++) {
unsigned int sz;
spin_lock_init(&channels[i].lock);
spin_lock_init(&channels[i].free_lock);
INIT_LIST_HEAD(&channels[i].free_list);
spin_lock_init(&channels[i].pending_lock);
INIT_LIST_HEAD(&channels[i].pending_cmds_list);
channels[i].vqueue = vqs[i];
sz = virtqueue_get_vring_size(channels[i].vqueue);
/* Tx messages need multiple descriptors. */
if (!channels[i].is_rx)
sz /= DESCRIPTORS_PER_TX_MSG;
if (sz > MSG_TOKEN_MAX) {
dev_info(dev,
"%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",
channels[i].is_rx ? "rx" : "tx",
sz, MSG_TOKEN_MAX);
sz = MSG_TOKEN_MAX;
}
channels[i].max_msg = sz;
}
vdev->priv = channels;
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
return 0;
}
static void scmi_vio_remove(struct virtio_device *vdev)
{
/*
* Once we get here, virtio_chan_free() will have already been called by
* the SCMI core for any existing channel and, as a consequence, all the
* virtio channels will have been already marked NOT ready, causing any
* outstanding message on any vqueue to be ignored by complete_cb: now
* we can just stop processing buffers and destroy the vqueues.
*/
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
/* Ensure scmi_vdev is visible as NULL */
smp_store_mb(scmi_vdev, NULL);
}
static int scmi_vio_validate(struct virtio_device *vdev)
{
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
dev_err(&vdev->dev,
"device does not comply with spec version 1.x\n");
return -EINVAL;
}
#endif
return 0;
}
static unsigned int features[] = {
VIRTIO_SCMI_F_P2A_CHANNELS,
};
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },
{ 0 }
};
static struct virtio_driver virtio_scmi_driver = {
.driver.name = "scmi-virtio",
.driver.owner = THIS_MODULE,
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.id_table = id_table,
.probe = scmi_vio_probe,
.remove = scmi_vio_remove,
.validate = scmi_vio_validate,
};
static int __init virtio_scmi_init(void)
{
return register_virtio_driver(&virtio_scmi_driver);
}
static void virtio_scmi_exit(void)
{
unregister_virtio_driver(&virtio_scmi_driver);
}
const struct scmi_desc scmi_virtio_desc = {
.transport_init = virtio_scmi_init,
.transport_exit = virtio_scmi_exit,
.ops = &scmi_virtio_ops,
/* for non-realtime virtio devices */
.max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
.max_msg = 0, /* overridden by virtio_get_max_msg() */
.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),
};
| linux-master | drivers/firmware/arm_scmi/virtio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uuid.h>
#include <uapi/linux/tee.h>
#include "common.h"
#define SCMI_OPTEE_MAX_MSG_SIZE 128
enum scmi_optee_pta_cmd {
/*
* PTA_SCMI_CMD_CAPABILITIES - Get channel capabilities
*
* [out] value[0].a: Capability bit mask (enum pta_scmi_caps)
* [out] value[0].b: Extended capabilities or 0
*/
PTA_SCMI_CMD_CAPABILITIES = 0,
/*
* PTA_SCMI_CMD_PROCESS_SMT_CHANNEL - Process SCMI message in SMT buffer
*
* [in] value[0].a: Channel handle
*
* Shared memory used for SCMI message/response exhange is expected
* already identified and bound to channel handle in both SCMI agent
* and SCMI server (OP-TEE) parts.
* The memory uses SMT header to carry SCMI meta-data (protocol ID and
* protocol message ID).
*/
PTA_SCMI_CMD_PROCESS_SMT_CHANNEL = 1,
/*
* PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE - Process SMT/SCMI message
*
* [in] value[0].a: Channel handle
* [in/out] memref[1]: Message/response buffer (SMT and SCMI payload)
*
* Shared memory used for SCMI message/response is a SMT buffer
* referenced by param[1]. It shall be 128 bytes large to fit response
* payload whatever message playload size.
* The memory uses SMT header to carry SCMI meta-data (protocol ID and
* protocol message ID).
*/
PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE = 2,
/*
* PTA_SCMI_CMD_GET_CHANNEL - Get channel handle
*
* SCMI shm information are 0 if agent expects to use OP-TEE regular SHM
*
* [in] value[0].a: Channel identifier
* [out] value[0].a: Returned channel handle
* [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
*/
PTA_SCMI_CMD_GET_CHANNEL = 3,
/*
* PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG
* buffer pointed by memref parameters
*
* [in] value[0].a: Channel handle
* [in] memref[1]: Message buffer (MSG and SCMI payload)
* [out] memref[2]: Response buffer (MSG and SCMI payload)
*
* Shared memories used for SCMI message/response are MSG buffers
* referenced by param[1] and param[2]. MSG transport protocol
* uses a 32bit header to carry SCMI meta-data (protocol ID and
* protocol message ID) followed by the effective SCMI message
* payload.
*/
PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4,
};
/*
* OP-TEE SCMI service capabilities bit flags (32bit)
*
* PTA_SCMI_CAPS_SMT_HEADER
* When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
* shared memory buffers to carry SCMI protocol synchronisation information.
*
* PTA_SCMI_CAPS_MSG_HEADER
* When set, OP-TEE supports command using MSG header protocol in an OP-TEE
* shared memory to carry SCMI protocol synchronisation information and SCMI
* message payload.
*/
#define PTA_SCMI_CAPS_NONE 0
#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
#define PTA_SCMI_CAPS_MSG_HEADER BIT(1)
#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \
PTA_SCMI_CAPS_MSG_HEADER)
/**
* struct scmi_optee_channel - Description of an OP-TEE SCMI channel
*
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
* @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
* @req: Shared memory protocol handle for SCMI request and synchronous response
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
struct scmi_optee_channel {
u32 channel_id;
u32 tee_session;
u32 caps;
u32 rx_len;
struct mutex mu;
struct scmi_chan_info *cinfo;
union {
struct scmi_shared_mem __iomem *shmem;
struct scmi_msg_payld *msg;
} req;
struct tee_shm *tee_shm;
struct list_head link;
};
/**
* struct scmi_optee_agent - OP-TEE transport private data
*
* @dev: Device used for communication with TEE
* @tee_ctx: TEE context used for communication
* @caps: Supported channel capabilities
* @mu: Mutex for protection of @channel_list
* @channel_list: List of all created channels for the agent
*/
struct scmi_optee_agent {
struct device *dev;
struct tee_context *tee_ctx;
u32 caps;
struct mutex mu;
struct list_head channel_list;
};
/* There can be only 1 SCMI service in OP-TEE we connect to */
static struct scmi_optee_agent *scmi_optee_private;
/* Forward reference to scmi_optee transport initialization */
static int scmi_optee_init(void);
/* Open a session toward SCMI OP-TEE service with REE_KERNEL identity */
static int open_session(struct scmi_optee_agent *agent, u32 *tee_session)
{
struct device *dev = agent->dev;
struct tee_client_device *scmi_pta = to_tee_client_device(dev);
struct tee_ioctl_open_session_arg arg = { };
int ret;
memcpy(arg.uuid, scmi_pta->id.uuid.b, TEE_IOCTL_UUID_LEN);
arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
ret = tee_client_open_session(agent->tee_ctx, &arg, NULL);
if (ret < 0 || arg.ret) {
dev_err(dev, "Can't open tee session: %d / %#x\n", ret, arg.ret);
return -EOPNOTSUPP;
}
*tee_session = arg.session;
return 0;
}
static void close_session(struct scmi_optee_agent *agent, u32 tee_session)
{
tee_client_close_session(agent->tee_ctx, tee_session);
}
static int get_capabilities(struct scmi_optee_agent *agent)
{
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
u32 caps;
u32 tee_session;
int ret;
ret = open_session(agent, &tee_session);
if (ret)
return ret;
arg.func = PTA_SCMI_CMD_CAPABILITIES;
arg.session = tee_session;
arg.num_params = 1;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
ret = tee_client_invoke_func(agent->tee_ctx, &arg, param);
close_session(agent, tee_session);
if (ret < 0 || arg.ret) {
dev_err(agent->dev, "Can't get capabilities: %d / %#x\n", ret, arg.ret);
return -EOPNOTSUPP;
}
caps = param[0].u.value.a;
if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) {
dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n");
return -EOPNOTSUPP;
}
agent->caps = caps;
return 0;
}
static int get_channel(struct scmi_optee_channel *channel)
{
struct device *dev = scmi_optee_private->dev;
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
unsigned int caps = 0;
int ret;
if (channel->tee_shm)
caps = PTA_SCMI_CAPS_MSG_HEADER;
else
caps = PTA_SCMI_CAPS_SMT_HEADER;
arg.func = PTA_SCMI_CMD_GET_CHANNEL;
arg.session = channel->tee_session;
arg.num_params = 1;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
param[0].u.value.a = channel->channel_id;
param[0].u.value.b = caps;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret || arg.ret) {
dev_err(dev, "Can't get channel with caps %#x: %d / %#x\n", caps, ret, arg.ret);
return -EOPNOTSUPP;
}
/* From now on use channel identifer provided by OP-TEE SCMI service */
channel->channel_id = param[0].u.value.a;
channel->caps = caps;
return 0;
}
static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
{
struct tee_ioctl_invoke_arg arg = {
.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL,
.session = channel->tee_session,
.num_params = 1,
};
struct tee_param param[1] = { };
int ret;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
channel->channel_id, ret, arg.ret);
return -EIO;
}
return 0;
}
static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size)
{
struct tee_ioctl_invoke_arg arg = {
.func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL,
.session = channel->tee_session,
.num_params = 3,
};
struct tee_param param[3] = { };
int ret;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
param[1].u.memref.shm = channel->tee_shm;
param[1].u.memref.size = msg_size;
param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[2].u.memref.shm = channel->tee_shm;
param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
channel->channel_id, ret, arg.ret);
return -EIO;
}
/* Save response size */
channel->rx_len = param[2].u.memref.size;
return 0;
}
static int scmi_optee_link_supplier(struct device *dev)
{
if (!scmi_optee_private) {
if (scmi_optee_init())
dev_dbg(dev, "Optee bus not yet ready\n");
/* Wait for optee bus */
return -EPROBE_DEFER;
}
if (!device_link_add(dev, scmi_optee_private->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) {
dev_err(dev, "Adding link to supplier optee device failed\n");
return -ECANCELED;
}
return 0;
}
static bool scmi_optee_chan_available(struct device_node *of_node, int idx)
{
u32 channel_id;
return !of_property_read_u32_index(of_node, "linaro,optee-channel-id",
idx, &channel_id);
}
static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
if (!channel->tee_shm)
shmem_clear_channel(channel->req.shmem);
}
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
{
const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
void *shbuf;
channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
if (IS_ERR(channel->tee_shm)) {
dev_err(channel->cinfo->dev, "shmem allocation failed\n");
return -ENOMEM;
}
shbuf = tee_shm_get_va(channel->tee_shm, 0);
memset(shbuf, 0, msg_size);
channel->req.msg = shbuf;
channel->rx_len = msg_size;
return 0;
}
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
struct device_node *np;
resource_size_t size;
struct resource res;
int ret;
np = of_parse_phandle(cinfo->dev->of_node, "shmem", 0);
if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
ret = -ENXIO;
goto out;
}
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(dev, "Failed to get SCMI Tx shared memory\n");
goto out;
}
size = resource_size(&res);
channel->req.shmem = devm_ioremap(dev, res.start, size);
if (!channel->req.shmem) {
dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
ret = -EADDRNOTAVAIL;
goto out;
}
ret = 0;
out:
of_node_put(np);
return ret;
}
static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
struct scmi_optee_channel *channel)
{
if (of_property_present(cinfo->dev->of_node, "shmem"))
return setup_static_shmem(dev, cinfo, channel);
else
return setup_dynamic_shmem(dev, channel);
}
static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
{
struct scmi_optee_channel *channel;
uint32_t channel_id;
int ret;
if (!tx)
return -ENODEV;
channel = devm_kzalloc(dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return -ENOMEM;
ret = of_property_read_u32_index(cinfo->dev->of_node, "linaro,optee-channel-id",
0, &channel_id);
if (ret)
return ret;
cinfo->transport_info = channel;
channel->cinfo = cinfo;
channel->channel_id = channel_id;
mutex_init(&channel->mu);
ret = setup_shmem(dev, cinfo, channel);
if (ret)
return ret;
ret = open_session(scmi_optee_private, &channel->tee_session);
if (ret)
goto err_free_shm;
ret = get_channel(channel);
if (ret)
goto err_close_sess;
/* Enable polling */
cinfo->no_completion_irq = true;
mutex_lock(&scmi_optee_private->mu);
list_add(&channel->link, &scmi_optee_private->channel_list);
mutex_unlock(&scmi_optee_private->mu);
return 0;
err_close_sess:
close_session(scmi_optee_private, channel->tee_session);
err_free_shm:
if (channel->tee_shm)
tee_shm_free(channel->tee_shm);
return ret;
}
static int scmi_optee_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_optee_channel *channel = cinfo->transport_info;
mutex_lock(&scmi_optee_private->mu);
list_del(&channel->link);
mutex_unlock(&scmi_optee_private->mu);
close_session(scmi_optee_private, channel->tee_session);
if (channel->tee_shm) {
tee_shm_free(channel->tee_shm);
channel->tee_shm = NULL;
}
cinfo->transport_info = NULL;
channel->cinfo = NULL;
return 0;
}
static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
int ret;
mutex_lock(&channel->mu);
if (channel->tee_shm) {
msg_tx_prepare(channel->req.msg, xfer);
ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
} else {
shmem_tx_prepare(channel->req.shmem, xfer, cinfo);
ret = invoke_process_smt_channel(channel);
}
if (ret)
mutex_unlock(&channel->mu);
return ret;
}
static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
if (channel->tee_shm)
msg_fetch_response(channel->req.msg, channel->rx_len, xfer);
else
shmem_fetch_response(channel->req.shmem, xfer);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
mutex_unlock(&channel->mu);
}
static struct scmi_transport_ops scmi_optee_ops = {
.link_supplier = scmi_optee_link_supplier,
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
.send_message = scmi_optee_send_message,
.mark_txdone = scmi_optee_mark_txdone,
.fetch_response = scmi_optee_fetch_response,
.clear_channel = scmi_optee_clear_channel,
};
static int scmi_optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_OPTEE;
}
static int scmi_optee_service_probe(struct device *dev)
{
struct scmi_optee_agent *agent;
struct tee_context *tee_ctx;
int ret;
/* Only one SCMI OP-TEE device allowed */
if (scmi_optee_private) {
dev_err(dev, "An SCMI OP-TEE device was already initialized: only one allowed\n");
return -EBUSY;
}
tee_ctx = tee_client_open_context(NULL, scmi_optee_ctx_match, NULL, NULL);
if (IS_ERR(tee_ctx))
return -ENODEV;
agent = devm_kzalloc(dev, sizeof(*agent), GFP_KERNEL);
if (!agent) {
ret = -ENOMEM;
goto err;
}
agent->dev = dev;
agent->tee_ctx = tee_ctx;
INIT_LIST_HEAD(&agent->channel_list);
mutex_init(&agent->mu);
ret = get_capabilities(agent);
if (ret)
goto err;
/* Ensure agent resources are all visible before scmi_optee_private is */
smp_mb();
scmi_optee_private = agent;
return 0;
err:
tee_client_close_context(tee_ctx);
return ret;
}
static int scmi_optee_service_remove(struct device *dev)
{
struct scmi_optee_agent *agent = scmi_optee_private;
if (!scmi_optee_private)
return -EINVAL;
if (!list_empty(&scmi_optee_private->channel_list))
return -EBUSY;
/* Ensure cleared reference is visible before resources are released */
smp_store_mb(scmi_optee_private, NULL);
tee_client_close_context(agent->tee_ctx);
return 0;
}
static const struct tee_client_device_id scmi_optee_service_id[] = {
{
UUID_INIT(0xa8cfe406, 0xd4f5, 0x4a2e,
0x9f, 0x8d, 0xa2, 0x5d, 0xc7, 0x54, 0xc0, 0x99)
},
{ }
};
MODULE_DEVICE_TABLE(tee, scmi_optee_service_id);
static struct tee_client_driver scmi_optee_driver = {
.id_table = scmi_optee_service_id,
.driver = {
.name = "scmi-optee",
.bus = &tee_bus_type,
.probe = scmi_optee_service_probe,
.remove = scmi_optee_service_remove,
},
};
static int scmi_optee_init(void)
{
return driver_register(&scmi_optee_driver.driver);
}
static void scmi_optee_exit(void)
{
if (scmi_optee_private)
driver_unregister(&scmi_optee_driver.driver);
}
const struct scmi_desc scmi_optee_desc = {
.transport_exit = scmi_optee_exit,
.ops = &scmi_optee_ops,
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = SCMI_OPTEE_MAX_MSG_SIZE,
.sync_cmds_completed_on_ret = true,
};
| linux-master | drivers/firmware/arm_scmi/optee.c |
// SPDX-License-Identifier: GPL-2.0
/*
* For transport using shared mem structure.
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/ktime.h>
#include <linux/io.h>
#include <linux/processor.h>
#include <linux/types.h>
#include <asm-generic/bug.h>
#include "common.h"
/*
* SCMI specification requires all parameters, message headers, return
* arguments or any protocol data to be expressed in little endian
* format only.
*/
struct scmi_shared_mem {
__le32 reserved;
__le32 channel_status;
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
__le32 reserved1[2];
__le32 flags;
#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
__le32 length;
__le32 msg_header;
u8 msg_payload[];
};
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer, struct scmi_chan_info *cinfo)
{
ktime_t stop;
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa.
* Giving up anyway after twice the expected channel timeout so as
* not to bail-out on intermittent issues where the platform is
* occasionally a bit slower to answer.
*
* Note that after a timeout is detected we bail-out and carry on but
* the transport functionality is probably permanently compromised:
* this is just to ease debugging and avoid complete hangs on boot
* due to a misbehaving SCMI firmware.
*/
stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms);
spin_until_cond((ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) ||
ktime_after(ktime_get(), stop));
if (!(ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) {
WARN_ON_ONCE(1);
dev_err(cinfo->dev,
"Timeout waiting for a free TX channel !\n");
return;
}
/* Mark channel busy + clear error */
iowrite32(0x0, &shmem->channel_status);
iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
&shmem->flags);
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
if (xfer->tx.buf)
memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
}
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
{
return ioread32(&shmem->msg_header);
}
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
size_t len = ioread32(&shmem->length);
xfer->hdr.status = ioread32(shmem->msg_payload);
/* Skip the length of header and status in shmem area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
size_t max_len, struct scmi_xfer *xfer)
{
size_t len = ioread32(&shmem->length);
/* Skip only the length of header in shmem area i.e 4 bytes */
xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
/* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
}
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
{
iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
}
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
u16 xfer_id;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
if (xfer->hdr.seq != xfer_id)
return false;
return ioread32(&shmem->channel_status) &
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
| linux-master | drivers/firmware/arm_scmi/shmem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCMI Generic power domain support.
*
* Copyright (C) 2018-2021 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
static const struct scmi_power_proto_ops *power_ops;
struct scmi_pm_domain {
struct generic_pm_domain genpd;
const struct scmi_protocol_handle *ph;
const char *name;
u32 domain;
};
#define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd)
static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
{
int ret;
u32 state, ret_state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
if (power_on)
state = SCMI_POWER_STATE_GENERIC_ON;
else
state = SCMI_POWER_STATE_GENERIC_OFF;
ret = power_ops->state_set(pd->ph, pd->domain, state);
if (!ret)
ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
if (!ret && state != ret_state)
return -EIO;
return ret;
}
static int scmi_pd_power_on(struct generic_pm_domain *domain)
{
return scmi_pd_power(domain, true);
}
static int scmi_pd_power_off(struct generic_pm_domain *domain)
{
return scmi_pd_power(domain, false);
}
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
struct scmi_pm_domain *scmi_pd;
struct genpd_onecell_data *scmi_pd_data;
struct generic_pm_domain **domains;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
if (!handle)
return -ENODEV;
power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph);
if (IS_ERR(power_ops))
return PTR_ERR(power_ops);
num_domains = power_ops->num_domains_get(ph);
if (num_domains < 0) {
dev_err(dev, "number of domains not found\n");
return num_domains;
}
scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL);
if (!scmi_pd)
return -ENOMEM;
scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL);
if (!scmi_pd_data)
return -ENOMEM;
domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
if (!domains)
return -ENOMEM;
for (i = 0; i < num_domains; i++, scmi_pd++) {
u32 state;
if (power_ops->state_get(ph, i, &state)) {
dev_warn(dev, "failed to get state for domain %d\n", i);
continue;
}
scmi_pd->domain = i;
scmi_pd->ph = ph;
scmi_pd->name = power_ops->name_get(ph, i);
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
domains[i] = &scmi_pd->genpd;
}
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
dev_set_drvdata(dev, scmi_pd_data);
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
static void scmi_pm_domain_remove(struct scmi_device *sdev)
{
int i;
struct genpd_onecell_data *scmi_pd_data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
of_genpd_del_provider(np);
scmi_pd_data = dev_get_drvdata(dev);
for (i = 0; i < scmi_pd_data->num_domains; i++) {
if (!scmi_pd_data->domains[i])
continue;
pm_genpd_remove(scmi_pd_data->domains[i]);
}
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
.remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI power domain driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/arm_scmi/scmi_pm_domain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
* Copyright (C) 2018-2023 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
#include <linux/bits.h>
#include <linux/hashtable.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
#include <linux/xarray.h>
#include <trace/events/scmi.h>
#include "protocols.h"
#include "notify.h"
#define MAX_OPPS 16
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
PERF_DESCRIBE_LEVELS = 0x4,
PERF_LIMITS_SET = 0x5,
PERF_LIMITS_GET = 0x6,
PERF_LEVEL_SET = 0x7,
PERF_LEVEL_GET = 0x8,
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
PERF_DESCRIBE_FASTCHANNEL = 0xb,
PERF_DOMAIN_NAME_GET = 0xc,
};
enum {
PERF_FC_LEVEL,
PERF_FC_LIMIT,
PERF_FC_MAX,
};
struct scmi_opp {
u32 perf;
u32 power;
u32 trans_latency_us;
u32 indicative_freq;
u32 level_index;
struct hlist_node hash;
};
struct scmi_msg_resp_perf_attributes {
__le16 num_domains;
__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
};
struct scmi_msg_resp_perf_domain_attributes {
__le32 flags;
#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
#define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_perf_describe_levels {
__le32 domain;
__le32 level_index;
};
struct scmi_perf_set_limits {
__le32 domain;
__le32 max_level;
__le32 min_level;
};
struct scmi_perf_get_limits {
__le32 max_level;
__le32 min_level;
};
struct scmi_perf_set_level {
__le32 domain;
__le32 level;
};
struct scmi_perf_notify_level_or_limits {
__le32 domain;
__le32 notify_enable;
};
struct scmi_perf_limits_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 range_max;
__le32 range_min;
};
struct scmi_perf_level_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 performance_level;
};
struct scmi_msg_resp_perf_describe_levels {
__le16 num_returned;
__le16 num_remaining;
struct {
__le32 perf_val;
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
} opp[];
};
struct scmi_msg_resp_perf_describe_levels_v4 {
__le16 num_returned;
__le16 num_remaining;
struct {
__le32 perf_val;
__le32 power;
__le16 transition_latency_us;
__le16 reserved;
__le32 indicative_freq;
__le32 level_index;
} opp[];
};
struct perf_dom_info {
u32 id;
bool set_limits;
bool set_perf;
bool perf_limit_notify;
bool perf_level_notify;
bool perf_fastchannels;
bool level_indexing_mode;
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
struct scmi_fc_info *fc_info;
struct xarray opps_by_idx;
struct xarray opps_by_lvl;
DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
};
#define LOOKUP_BY_FREQ(__htp, __freq) \
({ \
/* u32 cast is needed to pick right hash func */ \
u32 f_ = (u32)(__freq); \
struct scmi_opp *_opp; \
\
hash_for_each_possible((__htp), _opp, hash, f_) \
if (_opp->indicative_freq == f_) \
break; \
_opp; \
})
struct scmi_perf_info {
u32 version;
u16 num_domains;
enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
};
static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
PERF_NOTIFY_LIMITS,
PERF_NOTIFY_LEVEL,
};
static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_attributes *attr;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u16 flags = le16_to_cpu(attr->flags);
pi->num_domains = le16_to_cpu(attr->num_domains);
if (POWER_SCALE_IN_MILLIWATT(flags))
pi->power_scale = SCMI_POWER_MILLIWATTS;
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
if (POWER_SCALE_IN_MICROWATT(flags))
pi->power_scale = SCMI_POWER_MICROWATTS;
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static void scmi_perf_xa_destroy(void *data)
{
int domain;
struct scmi_perf_info *pinfo = data;
for (domain = 0; domain < pinfo->num_domains; domain++) {
xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
}
}
static int
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom_info,
u32 version)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
sizeof(dom_info->id), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(dom_info->id, t->tx.buf);
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
if (PROTOCOL_REV_MAJOR(version) >= 0x4)
dom_info->level_indexing_mode =
SUPPORTS_LEVEL_INDEXING(flags);
dom_info->sustained_freq_khz =
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
if (!dom_info->sustained_freq_khz ||
!dom_info->sustained_perf_level)
/* CPUFreq converts to kHz, hence default 1000 */
dom_info->mult_factor = 1000;
else
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
dom_info->id, dom_info->name,
SCMI_MAX_STR_SIZE);
if (dom_info->level_indexing_mode) {
xa_init(&dom_info->opps_by_idx);
xa_init(&dom_info->opps_by_lvl);
hash_init(dom_info->opps_by_freq);
}
return ret;
}
static int opp_cmp_func(const void *opp1, const void *opp2)
{
const struct scmi_opp *t1 = opp1, *t2 = opp2;
return t1->perf - t2->perf;
}
struct scmi_perf_ipriv {
u32 version;
struct perf_dom_info *perf_dom;
};
static void iter_perf_levels_prepare_message(void *message,
unsigned int desc_index,
const void *priv)
{
struct scmi_msg_perf_describe_levels *msg = message;
const struct scmi_perf_ipriv *p = priv;
msg->domain = cpu_to_le32(p->perf_dom->id);
/* Set the number of OPPs to be skipped/already read */
msg->level_index = cpu_to_le32(desc_index);
}
static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
const struct scmi_msg_resp_perf_describe_levels *r = response;
st->num_returned = le16_to_cpu(r->num_returned);
st->num_remaining = le16_to_cpu(r->num_remaining);
return 0;
}
static inline void
process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
{
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
}
static inline void
process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) {
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
}
static int
iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
struct scmi_opp *opp;
struct scmi_perf_ipriv *p = priv;
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
process_response_opp(opp, st->loop_idx, response);
else
process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
response);
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
opp->perf, opp->power, opp->trans_latency_us,
opp->indicative_freq, opp->level_index);
return 0;
}
static int
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *perf_dom, u32 version)
{
int ret;
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_perf_levels_prepare_message,
.update_state = iter_perf_levels_update_state,
.process_response = iter_perf_levels_process_response,
};
struct scmi_perf_ipriv ppriv = {
.version = version,
.perf_dom = perf_dom,
};
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
PERF_DESCRIBE_LEVELS,
sizeof(struct scmi_msg_perf_describe_levels),
&ppriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
if (perf_dom->opp_count)
sort(perf_dom->opp, perf_dom->opp_count,
sizeof(struct scmi_opp), opp_cmp_func, NULL);
return ret;
}
static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_limits *limits;
ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
sizeof(*limits), 0, &t);
if (ret)
return ret;
limits = t->tx.buf;
limits->domain = cpu_to_le32(domain);
limits->max_level = cpu_to_le32(max_perf);
limits->min_level = cpu_to_le32(min_perf);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static inline struct perf_dom_info *
scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
if (domain >= pi->num_domains)
return ERR_PTR(-EINVAL);
return pi->dom_info + domain;
}
static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom, u32 max_perf,
u32 min_perf)
{
if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
dom->id, min_perf, max_perf);
iowrite32(max_perf, fci->set_addr);
iowrite32(min_perf, fci->set_addr + 4);
ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
}
static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 max_perf, u32 min_perf)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
if (dom->level_indexing_mode) {
struct scmi_opp *opp;
if (min_perf) {
opp = xa_load(&dom->opps_by_lvl, min_perf);
if (!opp)
return -EIO;
min_perf = opp->level_index;
}
if (max_perf) {
opp = xa_load(&dom->opps_by_lvl, max_perf);
if (!opp)
return -EIO;
max_perf = opp->level_index;
}
}
return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
}
static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
u32 domain, u32 *max_perf, u32 *min_perf)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_get_limits *limits;
ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
limits = t->rx.buf;
*max_perf = le32_to_cpu(limits->max_level);
*min_perf = le32_to_cpu(limits->min_level);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom, u32 *max_perf,
u32 *min_perf)
{
if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
*max_perf = ioread32(fci->get_addr);
*min_perf = ioread32(fci->get_addr + 4);
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
dom->id, *min_perf, *max_perf);
return 0;
}
return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
}
static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
u32 domain, u32 *max_perf, u32 *min_perf)
{
int ret;
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
if (ret)
return ret;
if (dom->level_indexing_mode) {
struct scmi_opp *opp;
opp = xa_load(&dom->opps_by_idx, *min_perf);
if (!opp)
return -EIO;
*min_perf = opp->perf;
opp = xa_load(&dom->opps_by_idx, *max_perf);
if (!opp)
return -EIO;
*max_perf = opp->perf;
}
return 0;
}
static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 level, bool poll)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_set_level *lvl;
ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
lvl = t->tx.buf;
lvl->domain = cpu_to_le32(domain);
lvl->level = cpu_to_le32(level);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom, u32 level,
bool poll)
{
if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
dom->id, level, 0);
iowrite32(level, fci->set_addr);
ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
return scmi_perf_msg_level_set(ph, dom->id, level, poll);
}
static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 level, bool poll)
{
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
if (dom->level_indexing_mode) {
struct scmi_opp *opp;
opp = xa_load(&dom->opps_by_lvl, level);
if (!opp)
return -EIO;
level = opp->level_index;
}
return __scmi_perf_level_set(ph, dom, level, poll);
}
static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
u32 domain, u32 *level, bool poll)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
t->hdr.poll_completion = poll;
put_unaligned_le32(domain, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*level = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom, u32 *level,
bool poll)
{
if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
*level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
dom->id, *level, 0);
return 0;
}
return scmi_perf_msg_level_get(ph, dom->id, level, poll);
}
static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
u32 domain, u32 *level, bool poll)
{
int ret;
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
ret = __scmi_perf_level_get(ph, dom, level, poll);
if (ret)
return ret;
if (dom->level_indexing_mode) {
struct scmi_opp *opp;
opp = xa_load(&dom->opps_by_idx, *level);
if (!opp)
return -EIO;
*level = opp->perf;
}
return 0;
}
static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
u32 domain, int message_id,
bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_perf_notify_level_or_limits *notify;
ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LEVEL_SET, 4, domain,
&fc[PERF_FC_LEVEL].set_addr,
&fc[PERF_FC_LEVEL].set_db);
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LEVEL_GET, 4, domain,
&fc[PERF_FC_LEVEL].get_addr, NULL);
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LIMITS_SET, 8, domain,
&fc[PERF_FC_LIMIT].set_addr,
&fc[PERF_FC_LIMIT].set_db);
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
PERF_LIMITS_GET, 8, domain,
&fc[PERF_FC_LIMIT].get_addr, NULL);
*p_fc = fc;
}
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
struct of_phandle_args clkspec;
if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
0, &clkspec))
return -EINVAL;
return clkspec.args[0];
}
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
struct device *dev)
{
int idx, ret, domain;
unsigned long freq;
struct scmi_opp *opp;
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return -EINVAL;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
if (!dom->level_indexing_mode)
freq = opp->perf * dom->mult_factor;
else
freq = opp->indicative_freq * 1000;
ret = dev_pm_opp_add(dev, freq, 0);
if (ret) {
dev_warn(dev, "failed to add opp %luHz\n", freq);
while (idx-- > 0) {
if (!dom->level_indexing_mode)
freq = (--opp)->perf * dom->mult_factor;
else
freq = (--opp)->indicative_freq * 1000;
dev_pm_opp_remove(dev, freq);
}
return ret;
}
dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
domain, dom->name, idx, freq);
}
return 0;
}
static int
scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
struct device *dev)
{
int domain;
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return -EINVAL;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
/* uS to nS */
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}
static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long freq, bool poll)
{
unsigned int level;
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
if (!dom->level_indexing_mode) {
level = freq / dom->mult_factor;
} else {
struct scmi_opp *opp;
opp = LOOKUP_BY_FREQ(dom->opps_by_freq, freq / 1000);
if (!opp)
return -EIO;
level = opp->level_index;
}
return __scmi_perf_level_set(ph, dom, level, poll);
}
static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *freq, bool poll)
{
int ret;
u32 level;
struct perf_dom_info *dom;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
ret = __scmi_perf_level_get(ph, dom, &level, poll);
if (ret)
return ret;
if (!dom->level_indexing_mode) {
*freq = level * dom->mult_factor;
} else {
struct scmi_opp *opp;
opp = xa_load(&dom->opps_by_idx, level);
if (!opp)
return -EIO;
*freq = opp->indicative_freq * 1000;
}
return ret;
}
static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
u32 domain, unsigned long *freq,
unsigned long *power)
{
struct perf_dom_info *dom;
unsigned long opp_freq;
int idx, ret = -EINVAL;
struct scmi_opp *opp;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return PTR_ERR(dom);
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
if (!dom->level_indexing_mode)
opp_freq = opp->perf * dom->mult_factor;
else
opp_freq = opp->indicative_freq * 1000;
if (opp_freq < *freq)
continue;
*freq = opp_freq;
*power = opp->power;
ret = 0;
break;
}
return ret;
}
static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
struct device *dev)
{
int domain;
struct perf_dom_info *dom;
domain = scmi_dev_domain_id(dev);
if (domain < 0)
return false;
dom = scmi_perf_domain_lookup(ph, domain);
if (IS_ERR(dom))
return false;
return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
}
static enum scmi_power_scale
scmi_power_scale_get(const struct scmi_protocol_handle *ph)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
return pi->power_scale;
}
static const struct scmi_perf_proto_ops perf_proto_ops = {
.limits_set = scmi_perf_limits_set,
.limits_get = scmi_perf_limits_get,
.level_set = scmi_perf_level_set,
.level_get = scmi_perf_level_get,
.device_domain_id = scmi_dev_domain_id,
.transition_latency_get = scmi_dvfs_transition_latency_get,
.device_opps_add = scmi_dvfs_device_opps_add,
.freq_set = scmi_dvfs_freq_set,
.freq_get = scmi_dvfs_freq_get,
.est_power_get = scmi_dvfs_est_power_get,
.fast_switch_possible = scmi_fast_switch_possible,
.power_scale_get = scmi_power_scale_get,
};
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret, cmd_id;
if (evt_id >= ARRAY_SIZE(evt_2_cmd))
return -EINVAL;
cmd_id = evt_2_cmd[evt_id];
ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
return ret;
}
static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
void *rep = NULL;
switch (evt_id) {
case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
{
const struct scmi_perf_limits_notify_payld *p = payld;
struct scmi_perf_limits_report *r = report;
if (sizeof(*p) != payld_sz)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->range_max = le32_to_cpu(p->range_max);
r->range_min = le32_to_cpu(p->range_min);
*src_id = r->domain_id;
rep = r;
break;
}
case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
{
const struct scmi_perf_level_notify_payld *p = payld;
struct scmi_perf_level_report *r = report;
if (sizeof(*p) != payld_sz)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->performance_level = le32_to_cpu(p->performance_level);
*src_id = r->domain_id;
rep = r;
break;
}
default:
break;
}
return rep;
}
static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct scmi_perf_info *pi = ph->get_priv(ph);
if (!pi)
return -EINVAL;
return pi->num_domains;
}
static const struct scmi_event perf_events[] = {
{
.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
.max_report_sz = sizeof(struct scmi_perf_limits_report),
},
{
.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
.max_report_sz = sizeof(struct scmi_perf_level_report),
},
};
static const struct scmi_event_ops perf_event_ops = {
.get_num_sources = scmi_perf_get_num_sources,
.set_notify_enabled = scmi_perf_set_notify_enabled,
.fill_custom_report = scmi_perf_fill_custom_report,
};
static const struct scmi_protocol_events perf_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &perf_event_ops,
.evts = perf_events,
.num_events = ARRAY_SIZE(perf_events),
};
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain, ret;
u32 version;
struct scmi_perf_info *pinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
ret = scmi_perf_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
dom->id = domain;
scmi_perf_domain_attributes_get(ph, dom, version);
scmi_perf_describe_levels_get(ph, dom, version);
if (dom->perf_fastchannels)
scmi_perf_domain_init_fc(ph, dom->id, &dom->fc_info);
}
ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
if (ret)
return ret;
pinfo->version = version;
return ph->set_priv(ph, pinfo);
}
static const struct scmi_protocol scmi_perf = {
.id = SCMI_PROTOCOL_PERF,
.owner = THIS_MODULE,
.instance_init = &scmi_perf_protocol_init,
.ops = &perf_proto_ops,
.events = &perf_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
| linux-master | drivers/firmware/arm_scmi/perf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) System Power Protocol
*
* Copyright (C) 2020-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "protocols.h"
#include "notify.h"
#define SCMI_SYSTEM_NUM_SOURCES 1
enum scmi_system_protocol_cmd {
SYSTEM_POWER_STATE_NOTIFY = 0x5,
};
struct scmi_system_power_state_notify {
__le32 notify_enable;
};
struct scmi_system_power_state_notifier_payld {
__le32 agent_id;
__le32 flags;
__le32 system_state;
__le32 timeout;
};
struct scmi_system_info {
u32 version;
bool graceful_timeout_supported;
};
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_system_power_state_notify *notify;
ret = ph->xops->xfer_get_init(ph, SYSTEM_POWER_STATE_NOTIFY,
sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_system_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
ret = scmi_system_request_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] - ret:%d\n", evt_id, ret);
return ret;
}
static void *
scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
size_t expected_sz;
const struct scmi_system_power_state_notifier_payld *p = payld;
struct scmi_system_power_state_notifier_report *r = report;
struct scmi_system_info *pinfo = ph->get_priv(ph);
expected_sz = pinfo->graceful_timeout_supported ?
sizeof(*p) : sizeof(*p) - sizeof(__le32);
if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER ||
payld_sz != expected_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->flags = le32_to_cpu(p->flags);
r->system_state = le32_to_cpu(p->system_state);
if (pinfo->graceful_timeout_supported &&
r->system_state == SCMI_SYSTEM_SHUTDOWN &&
SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags))
r->timeout = le32_to_cpu(p->timeout);
else
r->timeout = 0x00;
*src_id = 0;
return r;
}
static const struct scmi_event system_events[] = {
{
.id = SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
.max_payld_sz =
sizeof(struct scmi_system_power_state_notifier_payld),
.max_report_sz =
sizeof(struct scmi_system_power_state_notifier_report),
},
};
static const struct scmi_event_ops system_event_ops = {
.set_notify_enabled = scmi_system_set_notify_enabled,
.fill_custom_report = scmi_system_fill_custom_report,
};
static const struct scmi_protocol_events system_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &system_event_ops,
.evts = system_events,
.num_events = ARRAY_SIZE(system_events),
.num_sources = SCMI_SYSTEM_NUM_SOURCES,
};
static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
{
int ret;
u32 version;
struct scmi_system_info *pinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "System Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
pinfo->version = version;
if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
pinfo->graceful_timeout_supported = true;
return ph->set_priv(ph, pinfo);
}
static const struct scmi_protocol scmi_system = {
.id = SCMI_PROTOCOL_SYSTEM,
.owner = THIS_MODULE,
.instance_init = &scmi_system_protocol_init,
.ops = NULL,
.events = &system_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(system, scmi_system)
| linux-master | drivers/firmware/arm_scmi/system.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Base Protocol
*
* Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications BASE - " fmt
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "notify.h"
#define SCMI_BASE_NUM_SOURCES 1
#define SCMI_BASE_MAX_CMD_ERR_COUNT 1024
enum scmi_base_protocol_cmd {
BASE_DISCOVER_VENDOR = 0x3,
BASE_DISCOVER_SUB_VENDOR = 0x4,
BASE_DISCOVER_IMPLEMENT_VERSION = 0x5,
BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
BASE_DISCOVER_AGENT = 0x7,
BASE_NOTIFY_ERRORS = 0x8,
BASE_SET_DEVICE_PERMISSIONS = 0x9,
BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
BASE_RESET_AGENT_CONFIGURATION = 0xb,
};
struct scmi_msg_resp_base_attributes {
u8 num_protocols;
u8 num_agents;
__le16 reserved;
};
struct scmi_msg_resp_base_discover_agent {
__le32 agent_id;
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_base_error_notify {
__le32 event_control;
#define BASE_TP_NOTIFY_ALL BIT(0)
};
struct scmi_base_error_notify_payld {
__le32 agent_id;
__le32 error_status;
#define IS_FATAL_ERROR(x) ((x) & BIT(31))
#define ERROR_CMD_COUNT(x) FIELD_GET(GENMASK(9, 0), (x))
__le64 msg_reports[SCMI_BASE_MAX_CMD_ERR_COUNT];
};
/**
* scmi_base_attributes_get() - gets the implementation details
* that are associated with the base protocol.
*
* @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int scmi_base_attributes_get(const struct scmi_protocol_handle *ph)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_base_attributes *attr_info;
struct scmi_revision_info *rev = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0, sizeof(*attr_info), &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr_info = t->rx.buf;
rev->num_protocols = attr_info->num_protocols;
rev->num_agents = attr_info->num_agents;
}
ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_base_vendor_id_get() - gets vendor/subvendor identifier ASCII string.
*
* @ph: SCMI protocol handle
* @sub_vendor: specify true if sub-vendor ID is needed
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor)
{
u8 cmd;
int ret, size;
char *vendor_id;
struct scmi_xfer *t;
struct scmi_revision_info *rev = ph->get_priv(ph);
if (sub_vendor) {
cmd = BASE_DISCOVER_SUB_VENDOR;
vendor_id = rev->sub_vendor_id;
size = ARRAY_SIZE(rev->sub_vendor_id);
} else {
cmd = BASE_DISCOVER_VENDOR;
vendor_id = rev->vendor_id;
size = ARRAY_SIZE(rev->vendor_id);
}
ret = ph->xops->xfer_get_init(ph, cmd, 0, size, &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
strscpy(vendor_id, t->rx.buf, size);
ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_base_implementation_version_get() - gets a vendor-specific
* implementation 32-bit version. The format of the version number is
* vendor-specific
*
* @ph: SCMI protocol handle
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
scmi_base_implementation_version_get(const struct scmi_protocol_handle *ph)
{
int ret;
__le32 *impl_ver;
struct scmi_xfer *t;
struct scmi_revision_info *rev = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_IMPLEMENT_VERSION,
0, sizeof(*impl_ver), &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
impl_ver = t->rx.buf;
rev->impl_ver = le32_to_cpu(*impl_ver);
}
ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_base_implementation_list_get() - gets the list of protocols it is
* OSPM is allowed to access
*
* @ph: SCMI protocol handle
* @protocols_imp: pointer to hold the list of protocol identifiers
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int
scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
u8 *protocols_imp)
{
u8 *list;
int ret, loop;
struct scmi_xfer *t;
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
struct device *dev = ph->dev;
struct scmi_revision_info *rev = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
sizeof(*num_skip), 0, &t);
if (ret)
return ret;
num_skip = t->tx.buf;
num_ret = t->rx.buf;
list = t->rx.buf + sizeof(*num_ret);
do {
size_t real_list_sz;
u32 calc_list_sz;
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
loop_num_ret = le32_to_cpu(*num_ret);
if (!loop_num_ret)
break;
if (loop_num_ret > rev->num_protocols - tot_num_ret) {
dev_err(dev,
"No. Returned protocols > Total protocols.\n");
break;
}
if (t->rx.len < (sizeof(u32) * 2)) {
dev_err(dev, "Truncated reply - rx.len:%zd\n",
t->rx.len);
ret = -EPROTO;
break;
}
real_list_sz = t->rx.len - sizeof(u32);
calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
sizeof(u32);
if (calc_list_sz != real_list_sz) {
dev_warn(dev,
"Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n",
real_list_sz, calc_list_sz, loop_num_ret);
/*
* Bail out if the expected list size is bigger than the
* total payload size of the received reply.
*/
if (calc_list_sz > real_list_sz) {
ret = -EPROTO;
break;
}
}
for (loop = 0; loop < loop_num_ret; loop++)
protocols_imp[tot_num_ret + loop] = *(list + loop);
tot_num_ret += loop_num_ret;
ph->xops->reset_rx_to_maxsz(ph, t);
} while (tot_num_ret < rev->num_protocols);
ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_base_discover_agent_get() - discover the name of an agent
*
* @ph: SCMI protocol handle
* @id: Agent identifier
* @name: Agent identifier ASCII string
*
* An agent id of 0 is reserved to identify the platform itself.
* Generally operating system is represented as "OSPM"
*
* Return: 0 on success, else appropriate SCMI error.
*/
static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
int id, char *name)
{
int ret;
struct scmi_msg_resp_base_discover_agent *agent_info;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
sizeof(__le32), sizeof(*agent_info), &t);
if (ret)
return ret;
put_unaligned_le32(id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
agent_info = t->rx.buf;
strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_base_error_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
int ret;
u32 evt_cntl = enable ? BASE_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_base_error_notify *cfg;
ret = ph->xops->xfer_get_init(ph, BASE_NOTIFY_ERRORS,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->event_control = cpu_to_le32(evt_cntl);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_base_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
ret = scmi_base_error_notify(ph, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] ret:%d\n", evt_id, ret);
return ret;
}
static void *scmi_base_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
int i;
const struct scmi_base_error_notify_payld *p = payld;
struct scmi_base_error_report *r = report;
/*
* BaseError notification payload is variable in size but
* up to a maximum length determined by the struct ponted by p.
* Instead payld_sz is the effective length of this notification
* payload so cannot be greater of the maximum allowed size as
* pointed by p.
*/
if (evt_id != SCMI_EVENT_BASE_ERROR_EVENT || sizeof(*p) < payld_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->fatal = IS_FATAL_ERROR(le32_to_cpu(p->error_status));
r->cmd_count = ERROR_CMD_COUNT(le32_to_cpu(p->error_status));
for (i = 0; i < r->cmd_count; i++)
r->reports[i] = le64_to_cpu(p->msg_reports[i]);
*src_id = 0;
return r;
}
static const struct scmi_event base_events[] = {
{
.id = SCMI_EVENT_BASE_ERROR_EVENT,
.max_payld_sz = sizeof(struct scmi_base_error_notify_payld),
.max_report_sz = sizeof(struct scmi_base_error_report) +
SCMI_BASE_MAX_CMD_ERR_COUNT * sizeof(u64),
},
};
static const struct scmi_event_ops base_event_ops = {
.set_notify_enabled = scmi_base_set_notify_enabled,
.fill_custom_report = scmi_base_fill_custom_report,
};
static const struct scmi_protocol_events base_protocol_events = {
.queue_sz = 4 * SCMI_PROTO_QUEUE_SZ,
.ops = &base_event_ops,
.evts = base_events,
.num_events = ARRAY_SIZE(base_events),
.num_sources = SCMI_BASE_NUM_SOURCES,
};
static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
{
int id, ret;
u8 *prot_imp;
u32 version;
char name[SCMI_SHORT_NAME_MAX_SIZE];
struct device *dev = ph->dev;
struct scmi_revision_info *rev = scmi_revision_area_get(ph);
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
ph->set_priv(ph, rev);
ret = scmi_base_attributes_get(ph);
if (ret)
return ret;
prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8),
GFP_KERNEL);
if (!prot_imp)
return -ENOMEM;
scmi_base_vendor_id_get(ph, false);
scmi_base_vendor_id_get(ph, true);
scmi_base_implementation_version_get(ph);
scmi_base_implementation_list_get(ph, prot_imp);
scmi_setup_protocol_implemented(ph, prot_imp);
dev_info(dev, "SCMI Protocol v%d.%d '%s:%s' Firmware version 0x%x\n",
rev->major_ver, rev->minor_ver, rev->vendor_id,
rev->sub_vendor_id, rev->impl_ver);
dev_dbg(dev, "Found %d protocol(s) %d agent(s)\n", rev->num_protocols,
rev->num_agents);
for (id = 0; id < rev->num_agents; id++) {
scmi_base_discover_agent_get(ph, id, name);
dev_dbg(dev, "Agent %d: %s\n", id, name);
}
return 0;
}
static const struct scmi_protocol scmi_base = {
.id = SCMI_PROTOCOL_BASE,
.owner = NULL,
.instance_init = &scmi_base_protocol_init,
.ops = NULL,
.events = &base_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(base, scmi_base)
| linux-master | drivers/firmware/arm_scmi/base.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Clock Protocol
*
* Copyright (C) 2018-2022 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/limits.h>
#include <linux/sort.h>
#include "protocols.h"
#include "notify.h"
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
CLOCK_DESCRIBE_RATES = 0x4,
CLOCK_RATE_SET = 0x5,
CLOCK_RATE_GET = 0x6,
CLOCK_CONFIG_SET = 0x7,
CLOCK_NAME_GET = 0x8,
CLOCK_RATE_NOTIFY = 0x9,
CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
};
struct scmi_msg_resp_clock_protocol_attributes {
__le16 num_clocks;
u8 max_async_req;
u8 reserved;
};
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 clock_enable_latency;
};
struct scmi_clock_set_config {
__le32 id;
__le32 attributes;
};
struct scmi_msg_clock_describe_rates {
__le32 id;
__le32 rate_index;
};
struct scmi_msg_resp_clock_describe_rates {
__le32 num_rates_flags;
#define NUM_RETURNED(x) ((x) & 0xfff)
#define RATE_DISCRETE(x) !((x) & BIT(12))
#define NUM_REMAINING(x) ((x) >> 16)
struct {
__le32 value_low;
__le32 value_high;
} rate[];
#define RATE_TO_U64(X) \
({ \
typeof(X) x = (X); \
le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
})
};
struct scmi_clock_set_rate {
__le32 flags;
#define CLOCK_SET_ASYNC BIT(0)
#define CLOCK_SET_IGNORE_RESP BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id;
__le32 value_low;
__le32 value_high;
};
struct scmi_msg_resp_set_rate_complete {
__le32 id;
__le32 rate_low;
__le32 rate_high;
};
struct scmi_msg_clock_rate_notify {
__le32 clk_id;
__le32 notify_enable;
};
struct scmi_clock_rate_notify_payld {
__le32 agent_id;
__le32 clock_id;
__le32 rate_low;
__le32 rate_high;
};
struct clock_info {
u32 version;
int num_clocks;
int max_async_req;
atomic_t cur_async_req;
struct scmi_clock_info *clk;
};
static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
CLOCK_RATE_NOTIFY,
CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
};
static int
scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct clock_info *ci)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_protocol_attributes *attr;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
ci->num_clocks = le16_to_cpu(attr->num_clocks);
ci->max_async_req = attr->max_async_req;
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
u32 clk_id, struct scmi_clock_info *clk,
u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
sizeof(clk_id), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 latency = 0;
attributes = le32_to_cpu(attr->attributes);
strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
/* clock_enable_latency field is present only since SCMI v3.1 */
if (PROTOCOL_REV_MAJOR(version) >= 0x2)
latency = le32_to_cpu(attr->clock_enable_latency);
clk->enable_latency = latency ? : U32_MAX;
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
if (SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
clk->name,
SCMI_MAX_STR_SIZE);
if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
clk->rate_changed_notifications = true;
if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
clk->rate_change_requested_notifications = true;
}
return ret;
}
static int rate_cmp_func(const void *_r1, const void *_r2)
{
const u64 *r1 = _r1, *r2 = _r2;
if (*r1 < *r2)
return -1;
else if (*r1 == *r2)
return 0;
else
return 1;
}
struct scmi_clk_ipriv {
struct device *dev;
u32 clk_id;
struct scmi_clock_info *clk;
};
static void iter_clk_describe_prepare_message(void *message,
const unsigned int desc_index,
const void *priv)
{
struct scmi_msg_clock_describe_rates *msg = message;
const struct scmi_clk_ipriv *p = priv;
msg->id = cpu_to_le32(p->clk_id);
/* Set the number of rates to be skipped/already read */
msg->rate_index = cpu_to_le32(desc_index);
}
static int
iter_clk_describe_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
u32 flags;
struct scmi_clk_ipriv *p = priv;
const struct scmi_msg_resp_clock_describe_rates *r = response;
flags = le32_to_cpu(r->num_rates_flags);
st->num_remaining = NUM_REMAINING(flags);
st->num_returned = NUM_RETURNED(flags);
p->clk->rate_discrete = RATE_DISCRETE(flags);
/* Warn about out of spec replies ... */
if (!p->clk->rate_discrete &&
(st->num_returned != 3 || st->num_remaining != 0)) {
dev_warn(p->dev,
"Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
p->clk->name, st->num_returned, st->num_remaining,
st->rx_len);
/*
* A known quirk: a triplet is returned but num_returned != 3
* Check for a safe payload size and fix.
*/
if (st->num_returned != 3 && st->num_remaining == 0 &&
st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
st->num_returned = 3;
st->num_remaining = 0;
} else {
dev_err(p->dev,
"Cannot fix out-of-spec reply !\n");
return -EPROTO;
}
}
return 0;
}
static int
iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
int ret = 0;
struct scmi_clk_ipriv *p = priv;
const struct scmi_msg_resp_clock_describe_rates *r = response;
if (!p->clk->rate_discrete) {
switch (st->desc_index + st->loop_idx) {
case 0:
p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
break;
case 1:
p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
break;
case 2:
p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
break;
default:
ret = -EINVAL;
break;
}
} else {
u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
*rate = RATE_TO_U64(r->rate[st->loop_idx]);
p->clk->list.num_rates++;
}
return ret;
}
static int
scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
struct scmi_clock_info *clk)
{
int ret;
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_clk_describe_prepare_message,
.update_state = iter_clk_describe_update_state,
.process_response = iter_clk_describe_process_response,
};
struct scmi_clk_ipriv cpriv = {
.clk_id = clk_id,
.clk = clk,
.dev = ph->dev,
};
iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
CLOCK_DESCRIBE_RATES,
sizeof(struct scmi_msg_clock_describe_rates),
&cpriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
if (!clk->rate_discrete) {
dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
clk->range.min_rate, clk->range.max_rate,
clk->range.step_size);
} else if (clk->list.num_rates) {
sort(clk->list.rates, clk->list.num_rates,
sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
}
return ret;
}
static int
scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
u32 clk_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
sizeof(__le32), sizeof(u64), &t);
if (ret)
return ret;
put_unaligned_le32(clk_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
u32 clk_id, u64 rate)
{
int ret;
u32 flags = 0;
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
struct clock_info *ci = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
if (ret)
return ret;
if (ci->max_async_req &&
atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
flags |= CLOCK_SET_ASYNC;
cfg = t->tx.buf;
cfg->flags = cpu_to_le32(flags);
cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32);
if (flags & CLOCK_SET_ASYNC) {
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_msg_resp_set_rate_complete *resp;
resp = t->rx.buf;
if (le32_to_cpu(resp->id) == clk_id)
dev_dbg(ph->dev,
"Clk ID %d set async to %llu\n", clk_id,
get_unaligned_le64(&resp->rate_low));
else
ret = -EPROTO;
}
} else {
ret = ph->xops->do_xfer(ph, t);
}
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
u32 config, bool atomic)
{
int ret;
struct scmi_xfer *t;
struct scmi_clock_set_config *cfg;
ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
sizeof(*cfg), 0, &t);
if (ret)
return ret;
t->hdr.poll_completion = atomic;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, 0, false);
}
static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
}
static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, 0, true);
}
static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
{
struct clock_info *ci = ph->get_priv(ph);
return ci->num_clocks;
}
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
if (clk_id >= ci->num_clocks)
return NULL;
clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
return clk;
}
static const struct scmi_clk_proto_ops clk_proto_ops = {
.count_get = scmi_clock_count_get,
.info_get = scmi_clock_info_get,
.rate_get = scmi_clock_rate_get,
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
.enable_atomic = scmi_clock_enable_atomic,
.disable_atomic = scmi_clock_disable_atomic,
};
static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
u32 clk_id, int message_id, bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_clock_rate_notify *notify;
ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->clk_id = cpu_to_le32(clk_id);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret, cmd_id;
if (evt_id >= ARRAY_SIZE(evt_2_cmd))
return -EINVAL;
cmd_id = evt_2_cmd[evt_id];
ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
return ret;
}
static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
const struct scmi_clock_rate_notify_payld *p = payld;
struct scmi_clock_rate_notif_report *r = report;
if (sizeof(*p) != payld_sz ||
(evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->clock_id = le32_to_cpu(p->clock_id);
r->rate = get_unaligned_le64(&p->rate_low);
*src_id = r->clock_id;
return r;
}
static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct clock_info *ci = ph->get_priv(ph);
if (!ci)
return -EINVAL;
return ci->num_clocks;
}
static const struct scmi_event clk_events[] = {
{
.id = SCMI_EVENT_CLOCK_RATE_CHANGED,
.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
},
{
.id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
.max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
.max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
},
};
static const struct scmi_event_ops clk_event_ops = {
.get_num_sources = scmi_clk_get_num_sources,
.set_notify_enabled = scmi_clk_set_notify_enabled,
.fill_custom_report = scmi_clk_fill_custom_report,
};
static const struct scmi_protocol_events clk_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &clk_event_ops,
.evts = clk_events,
.num_events = ARRAY_SIZE(clk_events),
};
static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int clkid, ret;
struct clock_info *cinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Clock Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
ret = scmi_clock_protocol_attributes_get(ph, cinfo);
if (ret)
return ret;
cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
sizeof(*cinfo->clk), GFP_KERNEL);
if (!cinfo->clk)
return -ENOMEM;
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
ret = scmi_clock_attributes_get(ph, clkid, clk, version);
if (!ret)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
cinfo->version = version;
return ph->set_priv(ph, cinfo);
}
static const struct scmi_protocol scmi_clock = {
.id = SCMI_PROTOCOL_CLOCK,
.owner = THIS_MODULE,
.instance_init = &scmi_clock_protocol_init,
.ops = &clk_proto_ops,
.events = &clk_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
| linux-master | drivers/firmware/arm_scmi/clock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
* Copyright (C) 2019-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "protocols.h"
#include "notify.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
RESET = 0x4,
RESET_NOTIFY = 0x5,
RESET_DOMAIN_NAME_GET = 0x6,
};
#define NUM_RESET_DOMAIN_MASK 0xffff
#define RESET_NOTIFY_ENABLE BIT(0)
struct scmi_msg_resp_reset_domain_attributes {
__le32 attributes;
#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
__le32 latency;
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_reset_domain_reset {
__le32 domain_id;
__le32 flags;
#define AUTONOMOUS_RESET BIT(0)
#define EXPLICIT_RESET_ASSERT BIT(1)
#define ASYNCHRONOUS_RESET BIT(2)
__le32 reset_state;
#define ARCH_COLD_RESET 0
};
struct scmi_msg_reset_notify {
__le32 id;
__le32 event_control;
#define RESET_TP_NOTIFY_ALL BIT(0)
};
struct scmi_reset_issued_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 reset_state;
};
struct reset_dom_info {
bool async_reset;
bool reset_notify;
u32 latency_us;
char name[SCMI_MAX_STR_SIZE];
};
struct scmi_reset_info {
u32 version;
int num_domains;
struct reset_dom_info *dom_info;
};
static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_reset_info *pi)
{
int ret;
struct scmi_xfer *t;
u32 attr;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0, sizeof(attr), &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attr = get_unaligned_le32(t->rx.buf);
pi->num_domains = attr & NUM_RESET_DOMAIN_MASK;
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct reset_dom_info *dom_info,
u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
dom_info->latency_us = le32_to_cpu(attr->latency);
if (dom_info->latency_us == U32_MAX)
dom_info->latency_us = 0;
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
dom_info->name, SCMI_MAX_STR_SIZE);
return ret;
}
static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
{
struct scmi_reset_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
static const char *
scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
static int scmi_reset_latency_get(const struct scmi_protocol_handle *ph,
u32 domain)
{
struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *dom = pi->dom_info + domain;
return dom->latency_us;
}
static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
u32 flags, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
struct reset_dom_info *rdom;
if (domain >= pi->num_domains)
return -EINVAL;
rdom = pi->dom_info + domain;
if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
if (ret)
return ret;
dom = t->tx.buf;
dom->domain_id = cpu_to_le32(domain);
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_reset_domain_reset(const struct scmi_protocol_handle *ph,
u32 domain)
{
return scmi_domain_reset(ph, domain, AUTONOMOUS_RESET,
ARCH_COLD_RESET);
}
static int
scmi_reset_domain_assert(const struct scmi_protocol_handle *ph, u32 domain)
{
return scmi_domain_reset(ph, domain, EXPLICIT_RESET_ASSERT,
ARCH_COLD_RESET);
}
static int
scmi_reset_domain_deassert(const struct scmi_protocol_handle *ph, u32 domain)
{
return scmi_domain_reset(ph, domain, 0, ARCH_COLD_RESET);
}
static const struct scmi_reset_proto_ops reset_proto_ops = {
.num_domains_get = scmi_reset_num_domains_get,
.name_get = scmi_reset_name_get,
.latency_get = scmi_reset_latency_get,
.reset = scmi_reset_domain_reset,
.assert = scmi_reset_domain_assert,
.deassert = scmi_reset_domain_deassert,
};
static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
u32 domain_id, bool enable)
{
int ret;
u32 evt_cntl = enable ? RESET_TP_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_reset_notify *cfg;
ret = ph->xops->xfer_get_init(ph, RESET_NOTIFY, sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(domain_id);
cfg->event_control = cpu_to_le32(evt_cntl);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_reset_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
ret = scmi_reset_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
return ret;
}
static void *
scmi_reset_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
const struct scmi_reset_issued_notify_payld *p = payld;
struct scmi_reset_issued_report *r = report;
if (evt_id != SCMI_EVENT_RESET_ISSUED || sizeof(*p) != payld_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->reset_state = le32_to_cpu(p->reset_state);
*src_id = r->domain_id;
return r;
}
static int scmi_reset_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct scmi_reset_info *pinfo = ph->get_priv(ph);
if (!pinfo)
return -EINVAL;
return pinfo->num_domains;
}
static const struct scmi_event reset_events[] = {
{
.id = SCMI_EVENT_RESET_ISSUED,
.max_payld_sz = sizeof(struct scmi_reset_issued_notify_payld),
.max_report_sz = sizeof(struct scmi_reset_issued_report),
},
};
static const struct scmi_event_ops reset_event_ops = {
.get_num_sources = scmi_reset_get_num_sources,
.set_notify_enabled = scmi_reset_set_notify_enabled,
.fill_custom_report = scmi_reset_fill_custom_report,
};
static const struct scmi_protocol_events reset_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &reset_event_ops,
.evts = reset_events,
.num_events = ARRAY_SIZE(reset_events),
};
static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain, ret;
u32 version;
struct scmi_reset_info *pinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
ret = scmi_reset_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
scmi_reset_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
return ph->set_priv(ph, pinfo);
}
static const struct scmi_protocol scmi_reset = {
.id = SCMI_PROTOCOL_RESET,
.owner = THIS_MODULE,
.instance_init = &scmi_reset_protocol_init,
.ops = &reset_proto_ops,
.events = &reset_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(reset, scmi_reset)
| linux-master | drivers/firmware/arm_scmi/reset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCMI Generic SystemPower Control driver.
*
* Copyright (C) 2020-2022 ARM Ltd.
*/
/*
* In order to handle platform originated SCMI SystemPower requests (like
* shutdowns or cold/warm resets) we register an SCMI Notification notifier
* block to react when such SCMI SystemPower events are emitted by platform.
*
* Once such a notification is received we act accordingly to perform the
* required system transition depending on the kind of request.
*
* Graceful requests are routed to userspace through the same API methods
* (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus
* events.
*
* Direct forceful requests are not supported since are not meant to be sent
* by the SCMI platform to an OSPM like Linux.
*
* Additionally, graceful request notifications can carry an optional timeout
* field stating the maximum amount of time allowed by the platform for
* completion after which they are converted to forceful ones: the assumption
* here is that even graceful requests can be upper-bound by a maximum final
* timeout strictly enforced by the platform itself which can ultimately cut
* the power off at will anytime; in order to avoid such extreme scenario, we
* track progress of graceful requests through the means of a reboot notifier
* converting timed-out graceful requests to forceful ones, so at least we
* try to perform a clean sync and shutdown/restart before the power is cut.
*
* Given the peculiar nature of SCMI SystemPower protocol, that is being in
* charge of triggering system wide shutdown/reboot events, there should be
* only one SCMI platform actively emitting SystemPower events.
* For this reason the SCMI core takes care to enforce the creation of one
* single unique device associated to the SCMI System Power protocol; no matter
* how many SCMI platforms are defined on the system, only one can be designated
* to support System Power: as a consequence this driver will never be probed
* more than once.
*
* For similar reasons as soon as the first valid SystemPower is received by
* this driver and the shutdown/reboot is started, any further notification
* possibly emitted by the platform will be ignored.
*/
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
#include <linux/time64.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#ifndef MODULE
#include <linux/fs.h>
#endif
enum scmi_syspower_state {
SCMI_SYSPOWER_IDLE,
SCMI_SYSPOWER_IN_PROGRESS,
SCMI_SYSPOWER_REBOOTING
};
/**
* struct scmi_syspower_conf - Common configuration
*
* @dev: A reference device
* @state: Current SystemPower state
* @state_mtx: @state related mutex
* @required_transition: The requested transition as decribed in the received
* SCMI SystemPower notification
* @userspace_nb: The notifier_block registered against the SCMI SystemPower
* notification to start the needed userspace interactions.
* @reboot_nb: A notifier_block optionally used to track reboot progress
* @forceful_work: A worker used to trigger a forceful transition once a
* graceful has timed out.
*/
struct scmi_syspower_conf {
struct device *dev;
enum scmi_syspower_state state;
/* Protect access to state */
struct mutex state_mtx;
enum scmi_system_events required_transition;
struct notifier_block userspace_nb;
struct notifier_block reboot_nb;
struct delayed_work forceful_work;
};
#define userspace_nb_to_sconf(x) \
container_of(x, struct scmi_syspower_conf, userspace_nb)
#define reboot_nb_to_sconf(x) \
container_of(x, struct scmi_syspower_conf, reboot_nb)
#define dwork_to_sconf(x) \
container_of(x, struct scmi_syspower_conf, forceful_work)
/**
* scmi_reboot_notifier - A reboot notifier to catch an ongoing successful
* system transition
* @nb: Reference to the related notifier block
* @reason: The reason for the ongoing reboot
* @__unused: The cmd being executed on a restart request (unused)
*
* When an ongoing system transition is detected, compatible with the one
* requested by SCMI, cancel the delayed work.
*
* Return: NOTIFY_OK in any case
*/
static int scmi_reboot_notifier(struct notifier_block *nb,
unsigned long reason, void *__unused)
{
struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb);
mutex_lock(&sc->state_mtx);
switch (reason) {
case SYS_HALT:
case SYS_POWER_OFF:
if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN)
sc->state = SCMI_SYSPOWER_REBOOTING;
break;
case SYS_RESTART:
if (sc->required_transition == SCMI_SYSTEM_COLDRESET ||
sc->required_transition == SCMI_SYSTEM_WARMRESET)
sc->state = SCMI_SYSPOWER_REBOOTING;
break;
default:
break;
}
if (sc->state == SCMI_SYSPOWER_REBOOTING) {
dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n");
cancel_delayed_work_sync(&sc->forceful_work);
}
mutex_unlock(&sc->state_mtx);
return NOTIFY_OK;
}
/**
* scmi_request_forceful_transition - Request forceful SystemPower transition
* @sc: A reference to the configuration data
*
* Initiates the required SystemPower transition without involving userspace:
* just trigger the action at the kernel level after issuing an emergency
* sync. (if possible at all)
*/
static inline void
scmi_request_forceful_transition(struct scmi_syspower_conf *sc)
{
dev_dbg(sc->dev, "Serving forceful request:%d\n",
sc->required_transition);
#ifndef MODULE
emergency_sync();
#endif
switch (sc->required_transition) {
case SCMI_SYSTEM_SHUTDOWN:
kernel_power_off();
break;
case SCMI_SYSTEM_COLDRESET:
case SCMI_SYSTEM_WARMRESET:
kernel_restart(NULL);
break;
default:
break;
}
}
static void scmi_forceful_work_func(struct work_struct *work)
{
struct scmi_syspower_conf *sc;
struct delayed_work *dwork;
if (system_state > SYSTEM_RUNNING)
return;
dwork = to_delayed_work(work);
sc = dwork_to_sconf(dwork);
dev_dbg(sc->dev, "Graceful request timed out...forcing !\n");
mutex_lock(&sc->state_mtx);
/* avoid deadlock by unregistering reboot notifier first */
unregister_reboot_notifier(&sc->reboot_nb);
if (sc->state == SCMI_SYSPOWER_IN_PROGRESS)
scmi_request_forceful_transition(sc);
mutex_unlock(&sc->state_mtx);
}
/**
* scmi_request_graceful_transition - Request graceful SystemPower transition
* @sc: A reference to the configuration data
* @timeout_ms: The desired timeout to wait for the shutdown to complete before
* system is forcibly shutdown.
*
* Initiates the required SystemPower transition, requesting userspace
* co-operation: it uses the same orderly_ methods used by ACPI Shutdown event
* processing.
*
* Takes care also to register a reboot notifier and to schedule a delayed work
* in order to detect if userspace actions are taking too long and in such a
* case to trigger a forceful transition.
*/
static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc,
unsigned int timeout_ms)
{
unsigned int adj_timeout_ms = 0;
if (timeout_ms) {
int ret;
sc->reboot_nb.notifier_call = &scmi_reboot_notifier;
ret = register_reboot_notifier(&sc->reboot_nb);
if (!ret) {
/* Wait only up to 75% of the advertised timeout */
adj_timeout_ms = mult_frac(timeout_ms, 3, 4);
INIT_DELAYED_WORK(&sc->forceful_work,
scmi_forceful_work_func);
schedule_delayed_work(&sc->forceful_work,
msecs_to_jiffies(adj_timeout_ms));
} else {
/* Carry on best effort even without a reboot notifier */
dev_warn(sc->dev,
"Cannot register reboot notifier !\n");
}
}
dev_dbg(sc->dev,
"Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n",
sc->required_transition, timeout_ms, adj_timeout_ms);
switch (sc->required_transition) {
case SCMI_SYSTEM_SHUTDOWN:
/*
* When triggered early at boot-time the 'orderly' call will
* partially fail due to the lack of userspace itself, but
* the force=true argument will start anyway a successful
* forced shutdown.
*/
orderly_poweroff(true);
break;
case SCMI_SYSTEM_COLDRESET:
case SCMI_SYSTEM_WARMRESET:
orderly_reboot();
break;
default:
break;
}
}
/**
* scmi_userspace_notifier - Notifier callback to act on SystemPower
* Notifications
* @nb: Reference to the related notifier block
* @event: The SystemPower notification event id
* @data: The SystemPower event report
*
* This callback is in charge of decoding the received SystemPower report
* and act accordingly triggering a graceful or forceful system transition.
*
* Note that once a valid SCMI SystemPower event starts being served, any
* other following SystemPower notification received from the same SCMI
* instance (handle) will be ignored.
*
* Return: NOTIFY_OK once a valid SystemPower event has been successfully
* processed.
*/
static int scmi_userspace_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct scmi_system_power_state_notifier_report *er = data;
struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb);
if (er->system_state >= SCMI_SYSTEM_POWERUP) {
dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n",
er->system_state);
return NOTIFY_DONE;
}
if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) {
dev_err(sc->dev, "Ignoring forceful notification.\n");
return NOTIFY_DONE;
}
/*
* Bail out if system is already shutting down or an SCMI SystemPower
* requested is already being served.
*/
if (system_state > SYSTEM_RUNNING)
return NOTIFY_DONE;
mutex_lock(&sc->state_mtx);
if (sc->state != SCMI_SYSPOWER_IDLE) {
dev_dbg(sc->dev,
"Transition already in progress...ignore.\n");
mutex_unlock(&sc->state_mtx);
return NOTIFY_DONE;
}
sc->state = SCMI_SYSPOWER_IN_PROGRESS;
mutex_unlock(&sc->state_mtx);
sc->required_transition = er->system_state;
/* Leaving a trace in logs of who triggered the shutdown/reboot. */
dev_info(sc->dev, "Serving shutdown/reboot request: %d\n",
sc->required_transition);
scmi_request_graceful_transition(sc, er->timeout);
return NOTIFY_OK;
}
static int scmi_syspower_probe(struct scmi_device *sdev)
{
int ret;
struct scmi_syspower_conf *sc;
struct scmi_handle *handle = sdev->handle;
if (!handle)
return -ENODEV;
ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM);
if (ret)
return ret;
sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL);
if (!sc)
return -ENOMEM;
sc->state = SCMI_SYSPOWER_IDLE;
mutex_init(&sc->state_mtx);
sc->required_transition = SCMI_SYSTEM_MAX;
sc->userspace_nb.notifier_call = &scmi_userspace_notifier;
sc->dev = &sdev->dev;
return handle->notify_ops->devm_event_notifier_register(sdev,
SCMI_PROTOCOL_SYSTEM,
SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER,
NULL, &sc->userspace_nb);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_SYSTEM, "syspower" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_system_power_driver = {
.name = "scmi-system-power",
.probe = scmi_syspower_probe,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_system_power_driver);
MODULE_AUTHOR("Cristian Marussi <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/arm_scmi/scmi_power_control.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Voltage Protocol
*
* Copyright (C) 2020-2022 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "protocols.h"
#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
#define REMAINING_LEVELS_MASK GENMASK(31, 16)
#define RETURNED_LEVELS_MASK GENMASK(11, 0)
enum scmi_voltage_protocol_cmd {
VOLTAGE_DOMAIN_ATTRIBUTES = 0x3,
VOLTAGE_DESCRIBE_LEVELS = 0x4,
VOLTAGE_CONFIG_SET = 0x5,
VOLTAGE_CONFIG_GET = 0x6,
VOLTAGE_LEVEL_SET = 0x7,
VOLTAGE_LEVEL_GET = 0x8,
VOLTAGE_DOMAIN_NAME_GET = 0x09,
};
#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
struct scmi_msg_resp_domain_attributes {
__le32 attr;
#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_cmd_describe_levels {
__le32 domain_id;
__le32 level_index;
};
struct scmi_msg_resp_describe_levels {
__le32 flags;
#define NUM_REMAINING_LEVELS(f) ((u16)(FIELD_GET(REMAINING_LEVELS_MASK, (f))))
#define NUM_RETURNED_LEVELS(f) ((u16)(FIELD_GET(RETURNED_LEVELS_MASK, (f))))
#define SUPPORTS_SEGMENTED_LEVELS(f) ((f) & BIT(12))
__le32 voltage[];
};
struct scmi_msg_cmd_config_set {
__le32 domain_id;
__le32 config;
};
struct scmi_msg_cmd_level_set {
__le32 domain_id;
__le32 flags;
__le32 voltage_level;
};
struct scmi_resp_voltage_level_set_complete {
__le32 domain_id;
__le32 voltage_level;
};
struct voltage_info {
unsigned int version;
unsigned int num_domains;
struct scmi_voltage_info *domains;
};
static int scmi_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
sizeof(__le32), &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
vinfo->num_domains =
NUM_VOLTAGE_DOMAINS(get_unaligned_le32(t->rx.buf));
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_init_voltage_levels(struct device *dev,
struct scmi_voltage_info *v,
u32 num_returned, u32 num_remaining,
bool segmented)
{
u32 num_levels;
num_levels = num_returned + num_remaining;
/*
* segmented levels entries are represented by a single triplet
* returned all in one go.
*/
if (!num_levels ||
(segmented && (num_remaining || num_returned != 3))) {
dev_err(dev,
"Invalid level descriptor(%d/%d/%d) for voltage dom %d\n",
num_levels, num_returned, num_remaining, v->id);
return -EINVAL;
}
v->levels_uv = devm_kcalloc(dev, num_levels, sizeof(u32), GFP_KERNEL);
if (!v->levels_uv)
return -ENOMEM;
v->num_levels = num_levels;
v->segmented = segmented;
return 0;
}
struct scmi_volt_ipriv {
struct device *dev;
struct scmi_voltage_info *v;
};
static void iter_volt_levels_prepare_message(void *message,
unsigned int desc_index,
const void *priv)
{
struct scmi_msg_cmd_describe_levels *msg = message;
const struct scmi_volt_ipriv *p = priv;
msg->domain_id = cpu_to_le32(p->v->id);
msg->level_index = cpu_to_le32(desc_index);
}
static int iter_volt_levels_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
int ret = 0;
u32 flags;
const struct scmi_msg_resp_describe_levels *r = response;
struct scmi_volt_ipriv *p = priv;
flags = le32_to_cpu(r->flags);
st->num_returned = NUM_RETURNED_LEVELS(flags);
st->num_remaining = NUM_REMAINING_LEVELS(flags);
/* Allocate space for num_levels if not already done */
if (!p->v->num_levels) {
ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned,
st->num_remaining,
SUPPORTS_SEGMENTED_LEVELS(flags));
if (!ret)
st->max_resources = p->v->num_levels;
}
return ret;
}
static int
iter_volt_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
s32 val;
const struct scmi_msg_resp_describe_levels *r = response;
struct scmi_volt_ipriv *p = priv;
val = (s32)le32_to_cpu(r->voltage[st->loop_idx]);
p->v->levels_uv[st->desc_index + st->loop_idx] = val;
if (val < 0)
p->v->negative_volts_allowed = true;
return 0;
}
static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
struct scmi_voltage_info *v)
{
int ret;
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_volt_levels_prepare_message,
.update_state = iter_volt_levels_update_state,
.process_response = iter_volt_levels_process_response,
};
struct scmi_volt_ipriv vpriv = {
.dev = ph->dev,
.v = v,
};
iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
VOLTAGE_DESCRIBE_LEVELS,
sizeof(struct scmi_msg_cmd_describe_levels),
&vpriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret) {
v->num_levels = 0;
devm_kfree(ph->dev, v->levels_uv);
}
return ret;
}
static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret, dom;
struct scmi_xfer *td;
struct scmi_msg_resp_domain_attributes *resp_dom;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
sizeof(__le32), sizeof(*resp_dom), &td);
if (ret)
return ret;
resp_dom = td->rx.buf;
for (dom = 0; dom < vinfo->num_domains; dom++) {
u32 attributes;
struct scmi_voltage_info *v;
/* Retrieve domain attributes at first ... */
put_unaligned_le32(dom, td->tx.buf);
/* Skip domain on comms error */
if (ph->xops->do_xfer(ph, td))
continue;
v = vinfo->domains + dom;
v->id = dom;
attributes = le32_to_cpu(resp_dom->attr);
strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) {
if (SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph,
VOLTAGE_DOMAIN_NAME_GET,
v->id, v->name,
SCMI_MAX_STR_SIZE);
if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
v->async_level_set = true;
}
/* Skip invalid voltage descriptors */
scmi_voltage_levels_get(ph, v);
}
ph->xops->xfer_put(ph, td);
return ret;
}
static int __scmi_voltage_get_u32(const struct scmi_protocol_handle *ph,
u8 cmd_id, u32 domain_id, u32 *value)
{
int ret;
struct scmi_xfer *t;
struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(__le32), 0, &t);
if (ret)
return ret;
put_unaligned_le32(domain_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_voltage_config_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 config)
{
int ret;
struct scmi_xfer *t;
struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_config_set *cmd;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_CONFIG_SET,
sizeof(*cmd), 0, &t);
if (ret)
return ret;
cmd = t->tx.buf;
cmd->domain_id = cpu_to_le32(domain_id);
cmd->config = cpu_to_le32(config & GENMASK(3, 0));
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *config)
{
return __scmi_voltage_get_u32(ph, VOLTAGE_CONFIG_GET,
domain_id, config);
}
static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
u32 domain_id,
enum scmi_voltage_level_mode mode,
s32 volt_uV)
{
int ret;
struct scmi_xfer *t;
struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_level_set *cmd;
struct scmi_voltage_info *v;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_LEVEL_SET,
sizeof(*cmd), 0, &t);
if (ret)
return ret;
v = vinfo->domains + domain_id;
cmd = t->tx.buf;
cmd->domain_id = cpu_to_le32(domain_id);
cmd->voltage_level = cpu_to_le32(volt_uV);
if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) {
cmd->flags = cpu_to_le32(0x0);
ret = ph->xops->do_xfer(ph, t);
} else {
cmd->flags = cpu_to_le32(0x1);
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_resp_voltage_level_set_complete *resp;
resp = t->rx.buf;
if (le32_to_cpu(resp->domain_id) == domain_id)
dev_dbg(ph->dev,
"Voltage domain %d set async to %d\n",
v->id,
le32_to_cpu(resp->voltage_level));
else
ret = -EPROTO;
}
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_voltage_level_get(const struct scmi_protocol_handle *ph,
u32 domain_id, s32 *volt_uV)
{
return __scmi_voltage_get_u32(ph, VOLTAGE_LEVEL_GET,
domain_id, (u32 *)volt_uV);
}
static const struct scmi_voltage_info * __must_check
scmi_voltage_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
{
struct voltage_info *vinfo = ph->get_priv(ph);
if (domain_id >= vinfo->num_domains ||
!vinfo->domains[domain_id].num_levels)
return NULL;
return vinfo->domains + domain_id;
}
static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
{
struct voltage_info *vinfo = ph->get_priv(ph);
return vinfo->num_domains;
}
static struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
.config_get = scmi_voltage_config_get,
.level_set = scmi_voltage_level_set,
.level_get = scmi_voltage_level_get,
};
static int scmi_voltage_protocol_init(const struct scmi_protocol_handle *ph)
{
int ret;
u32 version;
struct voltage_info *vinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Voltage Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
vinfo = devm_kzalloc(ph->dev, sizeof(*vinfo), GFP_KERNEL);
if (!vinfo)
return -ENOMEM;
vinfo->version = version;
ret = scmi_protocol_attributes_get(ph, vinfo);
if (ret)
return ret;
if (vinfo->num_domains) {
vinfo->domains = devm_kcalloc(ph->dev, vinfo->num_domains,
sizeof(*vinfo->domains),
GFP_KERNEL);
if (!vinfo->domains)
return -ENOMEM;
ret = scmi_voltage_descriptors_get(ph, vinfo);
if (ret)
return ret;
} else {
dev_warn(ph->dev, "No Voltage domains found.\n");
}
return ph->set_priv(ph, vinfo);
}
static const struct scmi_protocol scmi_voltage = {
.id = SCMI_PROTOCOL_VOLTAGE,
.owner = THIS_MODULE,
.instance_init = &scmi_voltage_protocol_init,
.ops = &voltage_proto_ops,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(voltage, scmi_voltage)
| linux-master | drivers/firmware/arm_scmi/voltage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Power Protocol
*
* Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "protocols.h"
#include "notify.h"
enum scmi_power_protocol_cmd {
POWER_DOMAIN_ATTRIBUTES = 0x3,
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
POWER_DOMAIN_NAME_GET = 0x8,
};
struct scmi_msg_resp_power_attributes {
__le16 num_domains;
__le16 reserved;
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
};
struct scmi_msg_resp_power_domain_attributes {
__le32 flags;
#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_power_set_state {
__le32 flags;
#define STATE_SET_ASYNC BIT(0)
__le32 domain;
__le32 state;
};
struct scmi_power_state_notify {
__le32 domain;
__le32 notify_enable;
};
struct scmi_power_state_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 power_state;
};
struct power_dom_info {
bool state_set_sync;
bool state_set_async;
bool state_set_notify;
char name[SCMI_MAX_STR_SIZE];
};
struct scmi_power_info {
u32 version;
int num_domains;
u64 stats_addr;
u32 stats_size;
struct power_dom_info *dom_info;
};
static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_power_info *pi)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_power_attributes *attr;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct power_dom_info *dom_info,
u32 version)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
ret = ph->xops->xfer_get_init(ph, POWER_DOMAIN_ATTRIBUTES,
sizeof(domain), sizeof(*attr), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
flags = le32_to_cpu(attr->flags);
dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags)) {
ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
domain, dom_info->name,
SCMI_MAX_STR_SIZE);
}
return ret;
}
static int scmi_power_state_set(const struct scmi_protocol_handle *ph,
u32 domain, u32 state)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_set_state *st;
ret = ph->xops->xfer_get_init(ph, POWER_STATE_SET, sizeof(*st), 0, &t);
if (ret)
return ret;
st = t->tx.buf;
st->flags = cpu_to_le32(0);
st->domain = cpu_to_le32(domain);
st->state = cpu_to_le32(state);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_power_state_get(const struct scmi_protocol_handle *ph,
u32 domain, u32 *state)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, POWER_STATE_GET, sizeof(u32), sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*state = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
{
struct scmi_power_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
static const char *
scmi_power_name_get(const struct scmi_protocol_handle *ph,
u32 domain)
{
struct scmi_power_info *pi = ph->get_priv(ph);
struct power_dom_info *dom = pi->dom_info + domain;
return dom->name;
}
static const struct scmi_power_proto_ops power_proto_ops = {
.num_domains_get = scmi_power_num_domains_get,
.name_get = scmi_power_name_get,
.state_set = scmi_power_state_set,
.state_get = scmi_power_state_get,
};
static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
u32 domain, bool enable)
{
int ret;
struct scmi_xfer *t;
struct scmi_power_state_notify *notify;
ret = ph->xops->xfer_get_init(ph, POWER_STATE_NOTIFY,
sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->domain = cpu_to_le32(domain);
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
ret = scmi_power_request_notify(ph, src_id, enable);
if (ret)
pr_debug("FAIL_ENABLE - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
return ret;
}
static void *
scmi_power_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
const struct scmi_power_state_notify_payld *p = payld;
struct scmi_power_state_changed_report *r = report;
if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED || sizeof(*p) != payld_sz)
return NULL;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->power_state = le32_to_cpu(p->power_state);
*src_id = r->domain_id;
return r;
}
static int scmi_power_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct scmi_power_info *pinfo = ph->get_priv(ph);
if (!pinfo)
return -EINVAL;
return pinfo->num_domains;
}
static const struct scmi_event power_events[] = {
{
.id = SCMI_EVENT_POWER_STATE_CHANGED,
.max_payld_sz = sizeof(struct scmi_power_state_notify_payld),
.max_report_sz =
sizeof(struct scmi_power_state_changed_report),
},
};
static const struct scmi_event_ops power_event_ops = {
.get_num_sources = scmi_power_get_num_sources,
.set_notify_enabled = scmi_power_set_notify_enabled,
.fill_custom_report = scmi_power_fill_custom_report,
};
static const struct scmi_protocol_events power_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &power_event_ops,
.evts = power_events,
.num_events = ARRAY_SIZE(power_events),
};
static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain, ret;
u32 version;
struct scmi_power_info *pinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
ret = scmi_power_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
if (!pinfo->dom_info)
return -ENOMEM;
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
scmi_power_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
return ph->set_priv(ph, pinfo);
}
static const struct scmi_protocol scmi_power = {
.id = SCMI_PROTOCOL_POWER,
.owner = THIS_MODULE,
.instance_init = &scmi_power_protocol_init,
.ops = &power_proto_ops,
.events = &power_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(power, scmi_power)
| linux-master | drivers/firmware/arm_scmi/power.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Powercap Protocol
*
* Copyright (C) 2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt
#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include <trace/events/scmi.h>
#include "protocols.h"
#include "notify.h"
enum scmi_powercap_protocol_cmd {
POWERCAP_DOMAIN_ATTRIBUTES = 0x3,
POWERCAP_CAP_GET = 0x4,
POWERCAP_CAP_SET = 0x5,
POWERCAP_PAI_GET = 0x6,
POWERCAP_PAI_SET = 0x7,
POWERCAP_DOMAIN_NAME_GET = 0x8,
POWERCAP_MEASUREMENTS_GET = 0x9,
POWERCAP_CAP_NOTIFY = 0xa,
POWERCAP_MEASUREMENTS_NOTIFY = 0xb,
POWERCAP_DESCRIBE_FASTCHANNEL = 0xc,
};
enum {
POWERCAP_FC_CAP,
POWERCAP_FC_PAI,
POWERCAP_FC_MAX,
};
struct scmi_msg_resp_powercap_domain_attributes {
__le32 attributes;
#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31))
#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30))
#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28))
#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27))
#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26))
#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25))
#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22))
#define POWERCAP_POWER_UNIT(x) \
(FIELD_GET(GENMASK(24, 23), (x)))
#define SUPPORTS_POWER_UNITS_MW(x) \
(POWERCAP_POWER_UNIT(x) == 0x2)
#define SUPPORTS_POWER_UNITS_UW(x) \
(POWERCAP_POWER_UNIT(x) == 0x1)
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 min_pai;
__le32 max_pai;
__le32 pai_step;
__le32 min_power_cap;
__le32 max_power_cap;
__le32 power_cap_step;
__le32 sustainable_power;
__le32 accuracy;
__le32 parent_id;
};
struct scmi_msg_powercap_set_cap_or_pai {
__le32 domain;
__le32 flags;
#define CAP_SET_ASYNC BIT(1)
#define CAP_SET_IGNORE_DRESP BIT(0)
__le32 value;
};
struct scmi_msg_resp_powercap_cap_set_complete {
__le32 domain;
__le32 power_cap;
};
struct scmi_msg_resp_powercap_meas_get {
__le32 power;
__le32 pai;
};
struct scmi_msg_powercap_notify_cap {
__le32 domain;
__le32 notify_enable;
};
struct scmi_msg_powercap_notify_thresh {
__le32 domain;
__le32 notify_enable;
__le32 power_thresh_low;
__le32 power_thresh_high;
};
struct scmi_powercap_cap_changed_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 power_cap;
__le32 pai;
};
struct scmi_powercap_meas_changed_notify_payld {
__le32 agent_id;
__le32 domain_id;
__le32 power;
};
struct scmi_powercap_state {
bool enabled;
u32 last_pcap;
bool meas_notif_enabled;
u64 thresholds;
#define THRESH_LOW(p, id) \
(lower_32_bits((p)->states[(id)].thresholds))
#define THRESH_HIGH(p, id) \
(upper_32_bits((p)->states[(id)].thresholds))
};
struct powercap_info {
u32 version;
int num_domains;
struct scmi_powercap_state *states;
struct scmi_powercap_info *powercaps;
};
static enum scmi_powercap_protocol_cmd evt_2_cmd[] = {
POWERCAP_CAP_NOTIFY,
POWERCAP_MEASUREMENTS_NOTIFY,
};
static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
u32 domain, int message_id, bool enable);
static int
scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
struct powercap_info *pi)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
sizeof(u32), &t);
if (ret)
return ret;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 attributes;
attributes = get_unaligned_le32(t->rx.buf);
pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static inline int
scmi_powercap_validate(unsigned int min_val, unsigned int max_val,
unsigned int step_val, bool configurable)
{
if (!min_val || !max_val)
return -EPROTO;
if ((configurable && min_val == max_val) ||
(!configurable && min_val != max_val))
return -EPROTO;
if (min_val != max_val && !step_val)
return -EPROTO;
return 0;
}
static int
scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
struct powercap_info *pinfo, u32 domain)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_powercap_info *dom_info = pinfo->powercaps + domain;
struct scmi_msg_resp_powercap_domain_attributes *resp;
ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES,
sizeof(domain), sizeof(*resp), &t);
if (ret)
return ret;
put_unaligned_le32(domain, t->tx.buf);
resp = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
flags = le32_to_cpu(resp->attributes);
dom_info->id = domain;
dom_info->notify_powercap_cap_change =
SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
dom_info->notify_powercap_measurement_change =
SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
dom_info->async_powercap_cap_set =
SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
dom_info->powercap_cap_config =
SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags);
dom_info->powercap_monitoring =
SUPPORTS_POWERCAP_MONITORING(flags);
dom_info->powercap_pai_config =
SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags);
dom_info->powercap_scale_mw =
SUPPORTS_POWER_UNITS_MW(flags);
dom_info->powercap_scale_uw =
SUPPORTS_POWER_UNITS_UW(flags);
dom_info->fastchannels =
SUPPORTS_POWERCAP_FASTCHANNELS(flags);
strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE);
dom_info->min_pai = le32_to_cpu(resp->min_pai);
dom_info->max_pai = le32_to_cpu(resp->max_pai);
dom_info->pai_step = le32_to_cpu(resp->pai_step);
ret = scmi_powercap_validate(dom_info->min_pai,
dom_info->max_pai,
dom_info->pai_step,
dom_info->powercap_pai_config);
if (ret) {
dev_err(ph->dev,
"Platform reported inconsistent PAI config for domain %d - %s\n",
dom_info->id, dom_info->name);
goto clean;
}
dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap);
dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap);
dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step);
ret = scmi_powercap_validate(dom_info->min_power_cap,
dom_info->max_power_cap,
dom_info->power_cap_step,
dom_info->powercap_cap_config);
if (ret) {
dev_err(ph->dev,
"Platform reported inconsistent CAP config for domain %d - %s\n",
dom_info->id, dom_info->name);
goto clean;
}
dom_info->sustainable_power =
le32_to_cpu(resp->sustainable_power);
dom_info->accuracy = le32_to_cpu(resp->accuracy);
dom_info->parent_id = le32_to_cpu(resp->parent_id);
if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID &&
(dom_info->parent_id >= pinfo->num_domains ||
dom_info->parent_id == dom_info->id)) {
dev_err(ph->dev,
"Platform reported inconsistent parent ID for domain %d - %s\n",
dom_info->id, dom_info->name);
ret = -ENODEV;
}
}
clean:
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET,
domain, dom_info->name,
SCMI_MAX_STR_SIZE);
return ret;
}
static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph)
{
struct powercap_info *pi = ph->get_priv(ph);
return pi->num_domains;
}
static const struct scmi_powercap_info *
scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id)
{
struct powercap_info *pi = ph->get_priv(ph);
if (domain_id >= pi->num_domains)
return NULL;
return pi->powercaps + domain_id;
}
static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *power_cap)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32),
sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(domain_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*power_cap = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int __scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
const struct scmi_powercap_info *dom,
u32 *power_cap)
{
if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) {
*power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr);
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET,
dom->id, *power_cap, 0);
return 0;
}
return scmi_powercap_xfer_cap_get(ph, dom->id, power_cap);
}
static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *power_cap)
{
const struct scmi_powercap_info *dom;
if (!power_cap)
return -EINVAL;
dom = scmi_powercap_dom_info_get(ph, domain_id);
if (!dom)
return -EINVAL;
return __scmi_powercap_cap_get(ph, dom, power_cap);
}
static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph,
const struct scmi_powercap_info *pc,
u32 power_cap, bool ignore_dresp)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_powercap_set_cap_or_pai *msg;
ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET,
sizeof(*msg), 0, &t);
if (ret)
return ret;
msg = t->tx.buf;
msg->domain = cpu_to_le32(pc->id);
msg->flags =
cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) |
FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp));
msg->value = cpu_to_le32(power_cap);
if (!pc->async_powercap_cap_set || ignore_dresp) {
ret = ph->xops->do_xfer(ph, t);
} else {
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_msg_resp_powercap_cap_set_complete *resp;
resp = t->rx.buf;
if (le32_to_cpu(resp->domain) == pc->id)
dev_dbg(ph->dev,
"Powercap ID %d CAP set async to %u\n",
pc->id,
get_unaligned_le32(&resp->power_cap));
else
ret = -EPROTO;
}
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int __scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
struct powercap_info *pi, u32 domain_id,
u32 power_cap, bool ignore_dresp)
{
int ret = -EINVAL;
const struct scmi_powercap_info *pc;
pc = scmi_powercap_dom_info_get(ph, domain_id);
if (!pc || !pc->powercap_cap_config)
return ret;
if (power_cap &&
(power_cap < pc->min_power_cap || power_cap > pc->max_power_cap))
return ret;
if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) {
struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP];
iowrite32(power_cap, fci->set_addr);
ph->hops->fastchannel_db_ring(fci->set_db);
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET,
domain_id, power_cap, 0);
ret = 0;
} else {
ret = scmi_powercap_xfer_cap_set(ph, pc, power_cap,
ignore_dresp);
}
/* Save the last explicitly set non-zero powercap value */
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 && !ret && power_cap)
pi->states[domain_id].last_pcap = power_cap;
return ret;
}
static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 power_cap,
bool ignore_dresp)
{
struct powercap_info *pi = ph->get_priv(ph);
/*
* Disallow zero as a possible explicitly requested powercap:
* there are enable/disable operations for this.
*/
if (!power_cap)
return -EINVAL;
/* Just log the last set request if acting on a disabled domain */
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x2 &&
!pi->states[domain_id].enabled) {
pi->states[domain_id].last_pcap = power_cap;
return 0;
}
return __scmi_powercap_cap_set(ph, pi, domain_id,
power_cap, ignore_dresp);
}
static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *pai)
{
int ret;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32),
sizeof(u32), &t);
if (ret)
return ret;
put_unaligned_le32(domain_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*pai = get_unaligned_le32(t->rx.buf);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *pai)
{
struct scmi_powercap_info *dom;
struct powercap_info *pi = ph->get_priv(ph);
if (!pai || domain_id >= pi->num_domains)
return -EINVAL;
dom = pi->powercaps + domain_id;
if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) {
*pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr);
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET,
domain_id, *pai, 0);
return 0;
}
return scmi_powercap_xfer_pai_get(ph, domain_id, pai);
}
static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 pai)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_powercap_set_cap_or_pai *msg;
ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET,
sizeof(*msg), 0, &t);
if (ret)
return ret;
msg = t->tx.buf;
msg->domain = cpu_to_le32(domain_id);
msg->flags = cpu_to_le32(0);
msg->value = cpu_to_le32(pai);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 pai)
{
const struct scmi_powercap_info *pc;
pc = scmi_powercap_dom_info_get(ph, domain_id);
if (!pc || !pc->powercap_pai_config || !pai ||
pai < pc->min_pai || pai > pc->max_pai)
return -EINVAL;
if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) {
struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI];
trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET,
domain_id, pai, 0);
iowrite32(pai, fci->set_addr);
ph->hops->fastchannel_db_ring(fci->set_db);
return 0;
}
return scmi_powercap_xfer_pai_set(ph, domain_id, pai);
}
static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *average_power,
u32 *pai)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_powercap_meas_get *resp;
const struct scmi_powercap_info *pc;
pc = scmi_powercap_dom_info_get(ph, domain_id);
if (!pc || !pc->powercap_monitoring || !pai || !average_power)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET,
sizeof(u32), sizeof(*resp), &t);
if (ret)
return ret;
resp = t->rx.buf;
put_unaligned_le32(domain_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
*average_power = le32_to_cpu(resp->power);
*pai = le32_to_cpu(resp->pai);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 *power_thresh_low,
u32 *power_thresh_high)
{
struct powercap_info *pi = ph->get_priv(ph);
if (!power_thresh_low || !power_thresh_high ||
domain_id >= pi->num_domains)
return -EINVAL;
*power_thresh_low = THRESH_LOW(pi, domain_id);
*power_thresh_high = THRESH_HIGH(pi, domain_id);
return 0;
}
static int
scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 power_thresh_low,
u32 power_thresh_high)
{
int ret = 0;
struct powercap_info *pi = ph->get_priv(ph);
if (domain_id >= pi->num_domains ||
power_thresh_low > power_thresh_high)
return -EINVAL;
/* Anything to do ? */
if (THRESH_LOW(pi, domain_id) == power_thresh_low &&
THRESH_HIGH(pi, domain_id) == power_thresh_high)
return ret;
pi->states[domain_id].thresholds =
(FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) |
FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high));
/* Update thresholds if notification already enabled */
if (pi->states[domain_id].meas_notif_enabled)
ret = scmi_powercap_notify(ph, domain_id,
POWERCAP_MEASUREMENTS_NOTIFY,
true);
return ret;
}
static int scmi_powercap_cap_enable_set(const struct scmi_protocol_handle *ph,
u32 domain_id, bool enable)
{
int ret;
u32 power_cap;
struct powercap_info *pi = ph->get_priv(ph);
if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
return -EINVAL;
if (enable == pi->states[domain_id].enabled)
return 0;
if (enable) {
/* Cannot enable with a zero powercap. */
if (!pi->states[domain_id].last_pcap)
return -EINVAL;
ret = __scmi_powercap_cap_set(ph, pi, domain_id,
pi->states[domain_id].last_pcap,
true);
} else {
ret = __scmi_powercap_cap_set(ph, pi, domain_id, 0, true);
}
if (ret)
return ret;
/*
* Update our internal state to reflect final platform state: the SCMI
* server could have ignored a disable request and kept enforcing some
* powercap limit requested by other agents.
*/
ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
if (!ret)
pi->states[domain_id].enabled = !!power_cap;
return ret;
}
static int scmi_powercap_cap_enable_get(const struct scmi_protocol_handle *ph,
u32 domain_id, bool *enable)
{
int ret;
u32 power_cap;
struct powercap_info *pi = ph->get_priv(ph);
*enable = true;
if (PROTOCOL_REV_MAJOR(pi->version) < 0x2)
return 0;
/*
* Report always real platform state; platform could have ignored
* a previous disable request. Default true on any error.
*/
ret = scmi_powercap_cap_get(ph, domain_id, &power_cap);
if (!ret)
*enable = !!power_cap;
/* Update internal state with current real platform state */
pi->states[domain_id].enabled = *enable;
return 0;
}
static const struct scmi_powercap_proto_ops powercap_proto_ops = {
.num_domains_get = scmi_powercap_num_domains_get,
.info_get = scmi_powercap_dom_info_get,
.cap_get = scmi_powercap_cap_get,
.cap_set = scmi_powercap_cap_set,
.cap_enable_set = scmi_powercap_cap_enable_set,
.cap_enable_get = scmi_powercap_cap_enable_get,
.pai_get = scmi_powercap_pai_get,
.pai_set = scmi_powercap_pai_set,
.measurements_get = scmi_powercap_measurements_get,
.measurements_threshold_set = scmi_powercap_measurements_threshold_set,
.measurements_threshold_get = scmi_powercap_measurements_threshold_get,
};
static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
u32 domain, struct scmi_fc_info **p_fc)
{
struct scmi_fc_info *fc;
fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL);
if (!fc)
return;
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_CAP_SET, 4, domain,
&fc[POWERCAP_FC_CAP].set_addr,
&fc[POWERCAP_FC_CAP].set_db);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_CAP_GET, 4, domain,
&fc[POWERCAP_FC_CAP].get_addr, NULL);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_SET, 4, domain,
&fc[POWERCAP_FC_PAI].set_addr,
&fc[POWERCAP_FC_PAI].set_db);
ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
POWERCAP_PAI_GET, 4, domain,
&fc[POWERCAP_FC_PAI].get_addr, NULL);
*p_fc = fc;
}
static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
u32 domain, int message_id, bool enable)
{
int ret;
struct scmi_xfer *t;
switch (message_id) {
case POWERCAP_CAP_NOTIFY:
{
struct scmi_msg_powercap_notify_cap *notify;
ret = ph->xops->xfer_get_init(ph, message_id,
sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->domain = cpu_to_le32(domain);
notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
break;
}
case POWERCAP_MEASUREMENTS_NOTIFY:
{
u32 low, high;
struct scmi_msg_powercap_notify_thresh *notify;
/*
* Note that we have to pick the most recently configured
* thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY
* enable request and we fail, complaining, if no thresholds
* were ever set, since this is an indication the API has been
* used wrongly.
*/
ret = scmi_powercap_measurements_threshold_get(ph, domain,
&low, &high);
if (ret)
return ret;
if (enable && !low && !high) {
dev_err(ph->dev,
"Invalid Measurements Notify thresholds: %u/%u\n",
low, high);
return -EINVAL;
}
ret = ph->xops->xfer_get_init(ph, message_id,
sizeof(*notify), 0, &t);
if (ret)
return ret;
notify = t->tx.buf;
notify->domain = cpu_to_le32(domain);
notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0);
notify->power_thresh_low = cpu_to_le32(low);
notify->power_thresh_high = cpu_to_le32(high);
break;
}
default:
return -EINVAL;
}
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int
scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret, cmd_id;
struct powercap_info *pi = ph->get_priv(ph);
if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
return -EINVAL;
cmd_id = evt_2_cmd[evt_id];
ret = scmi_powercap_notify(ph, src_id, cmd_id, enable);
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY)
/*
* On success save the current notification enabled state, so
* as to be able to properly update the notification thresholds
* when they are modified on a domain for which measurement
* notifications were currently enabled.
*
* This is needed because the SCMI Notification core machinery
* and API does not support passing per-notification custom
* arguments at callback registration time.
*
* Note that this can be done here with a simple flag since the
* SCMI core Notifications code takes care of keeping proper
* per-domain enables refcounting, so that this helper function
* will be called only once (for enables) when the first user
* registers a callback on this domain and once more (disable)
* when the last user de-registers its callback.
*/
pi->states[src_id].meas_notif_enabled = enable;
return ret;
}
static void *
scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
void *rep = NULL;
switch (evt_id) {
case SCMI_EVENT_POWERCAP_CAP_CHANGED:
{
const struct scmi_powercap_cap_changed_notify_payld *p = payld;
struct scmi_powercap_cap_changed_report *r = report;
if (sizeof(*p) != payld_sz)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->power_cap = le32_to_cpu(p->power_cap);
r->pai = le32_to_cpu(p->pai);
*src_id = r->domain_id;
rep = r;
break;
}
case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED:
{
const struct scmi_powercap_meas_changed_notify_payld *p = payld;
struct scmi_powercap_meas_changed_report *r = report;
if (sizeof(*p) != payld_sz)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
r->power = le32_to_cpu(p->power);
*src_id = r->domain_id;
rep = r;
break;
}
default:
break;
}
return rep;
}
static int
scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct powercap_info *pi = ph->get_priv(ph);
if (!pi)
return -EINVAL;
return pi->num_domains;
}
static const struct scmi_event powercap_events[] = {
{
.id = SCMI_EVENT_POWERCAP_CAP_CHANGED,
.max_payld_sz =
sizeof(struct scmi_powercap_cap_changed_notify_payld),
.max_report_sz =
sizeof(struct scmi_powercap_cap_changed_report),
},
{
.id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED,
.max_payld_sz =
sizeof(struct scmi_powercap_meas_changed_notify_payld),
.max_report_sz =
sizeof(struct scmi_powercap_meas_changed_report),
},
};
static const struct scmi_event_ops powercap_event_ops = {
.get_num_sources = scmi_powercap_get_num_sources,
.set_notify_enabled = scmi_powercap_set_notify_enabled,
.fill_custom_report = scmi_powercap_fill_custom_report,
};
static const struct scmi_protocol_events powercap_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &powercap_event_ops,
.evts = powercap_events,
.num_events = ARRAY_SIZE(powercap_events),
};
static int
scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain, ret;
u32 version;
struct powercap_info *pinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Powercap Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
ret = scmi_powercap_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->powercaps),
GFP_KERNEL);
if (!pinfo->powercaps)
return -ENOMEM;
pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->states), GFP_KERNEL);
if (!pinfo->states)
return -ENOMEM;
/*
* Note that any failure in retrieving any domain attribute leads to
* the whole Powercap protocol initialization failure: this way the
* reported Powercap domains are all assured, when accessed, to be well
* formed and correlated by sane parent-child relationship (if any).
*/
for (domain = 0; domain < pinfo->num_domains; domain++) {
ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain);
if (ret)
return ret;
if (pinfo->powercaps[domain].fastchannels)
scmi_powercap_domain_init_fc(ph, domain,
&pinfo->powercaps[domain].fc_info);
/* Grab initial state when disable is supported. */
if (PROTOCOL_REV_MAJOR(version) >= 0x2) {
ret = __scmi_powercap_cap_get(ph,
&pinfo->powercaps[domain],
&pinfo->states[domain].last_pcap);
if (ret)
return ret;
pinfo->states[domain].enabled =
!!pinfo->states[domain].last_pcap;
}
}
pinfo->version = version;
return ph->set_priv(ph, pinfo);
}
static const struct scmi_protocol scmi_powercap = {
.id = SCMI_PROTOCOL_POWERCAP,
.owner = THIS_MODULE,
.instance_init = &scmi_powercap_protocol_init,
.ops = &powercap_proto_ops,
.events = &powercap_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap)
| linux-master | drivers/firmware/arm_scmi/powercap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Protocol bus layer
*
* Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include "common.h"
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
static DEFINE_IDA(scmi_bus_id);
static DEFINE_IDR(scmi_requested_devices);
/* Protect access to scmi_requested_devices */
static DEFINE_MUTEX(scmi_requested_devices_mtx);
struct scmi_requested_dev {
const struct scmi_device_id *id_table;
struct list_head node;
};
/* Track globally the creation of SCMI SystemPower related devices */
static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
/**
* scmi_protocol_device_request - Helper to request a device
*
* @id_table: A protocol/name pair descriptor for the device to be created.
*
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
* The requested device name MUST NOT be already existent for any protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
*
* Return: 0 on Success
*/
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
pr_debug("Requesting SCMI device (%s) for protocol %x\n",
id_table->name, id_table->protocol_id);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) &&
!IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX)) {
pr_warn("SCMI Raw mode active. Rejecting '%s'/0x%02X\n",
id_table->name, id_table->protocol_id);
return -EINVAL;
}
/*
* Search for the matching protocol rdev list and then search
* of any existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
idr_for_each_entry(&scmi_requested_devices, head, id) {
if (!phead) {
/* A list found registered in the IDR is never empty */
rdev = list_first_entry(head, struct scmi_requested_dev,
node);
if (rdev->id_table->protocol_id ==
id_table->protocol_id)
phead = head;
}
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
rdev->id_table->protocol_id,
rdev->id_table->name);
ret = -EINVAL;
goto out;
}
}
}
/*
* No duplicate found for requested id_table, so let's create a new
* requested device entry for this new valid request.
*/
rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
ret = -ENOMEM;
goto out;
}
rdev->id_table = id_table;
/*
* Append the new requested device table descriptor to the head of the
* related protocol list, eventually creating such head if not already
* there.
*/
if (!phead) {
phead = kzalloc(sizeof(*phead), GFP_KERNEL);
if (!phead) {
kfree(rdev);
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(phead);
ret = idr_alloc(&scmi_requested_devices, (void *)phead,
id_table->protocol_id,
id_table->protocol_id + 1, GFP_KERNEL);
if (ret != id_table->protocol_id) {
pr_err("Failed to save SCMI device - ret:%d\n", ret);
kfree(rdev);
kfree(phead);
ret = -EINVAL;
goto out;
}
ret = 0;
}
list_add(&rdev->node, phead);
out:
mutex_unlock(&scmi_requested_devices_mtx);
if (!ret)
blocking_notifier_call_chain(&scmi_requested_devices_nh,
SCMI_BUS_NOTIFY_DEVICE_REQUEST,
(void *)rdev->id_table);
return ret;
}
/**
* scmi_protocol_device_unrequest - Helper to unrequest a device
*
* @id_table: A protocol/name pair descriptor for the device to be unrequested.
*
* The unrequested device, described by the provided id_table, is at first
* removed from the IDR @scmi_requested_devices and then the removal is
* advertised to any registered party via the @scmi_requested_devices_nh
* notification chain.
*/
static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
{
struct list_head *phead;
pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
id_table->name, id_table->protocol_id);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
if (phead) {
struct scmi_requested_dev *victim, *tmp;
list_for_each_entry_safe(victim, tmp, phead, node) {
if (!strcmp(victim->id_table->name, id_table->name)) {
list_del(&victim->node);
mutex_unlock(&scmi_requested_devices_mtx);
blocking_notifier_call_chain(&scmi_requested_devices_nh,
SCMI_BUS_NOTIFY_DEVICE_UNREQUEST,
(void *)victim->id_table);
kfree(victim);
mutex_lock(&scmi_requested_devices_mtx);
break;
}
}
if (list_empty(phead)) {
idr_remove(&scmi_requested_devices,
id_table->protocol_id);
kfree(phead);
}
}
mutex_unlock(&scmi_requested_devices_mtx);
}
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
{
const struct scmi_device_id *id = scmi_drv->id_table;
if (!id)
return NULL;
for (; id->protocol_id; id++)
if (id->protocol_id == scmi_dev->protocol_id) {
if (!id->name)
return id;
else if (!strcmp(id->name, scmi_dev->name))
return id;
}
return NULL;
}
static int scmi_dev_match(struct device *dev, struct device_driver *drv)
{
struct scmi_driver *scmi_drv = to_scmi_driver(drv);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
const struct scmi_device_id *id;
id = scmi_dev_match_id(scmi_dev, scmi_drv);
if (id)
return 1;
return 0;
}
static int scmi_match_by_id_table(struct device *dev, void *data)
{
struct scmi_device *sdev = to_scmi_dev(dev);
struct scmi_device_id *id_table = data;
return sdev->protocol_id == id_table->protocol_id &&
(id_table->name && !strcmp(sdev->name, id_table->name));
}
static struct scmi_device *scmi_child_dev_find(struct device *parent,
int prot_id, const char *name)
{
struct scmi_device_id id_table;
struct device *dev;
id_table.protocol_id = prot_id;
id_table.name = name;
dev = device_find_child(parent, &id_table, scmi_match_by_id_table);
if (!dev)
return NULL;
return to_scmi_dev(dev);
}
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
if (!scmi_dev->handle)
return -EPROBE_DEFER;
return scmi_drv->probe(scmi_dev);
}
static void scmi_dev_remove(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
struct scmi_device *scmi_dev = to_scmi_dev(dev);
if (scmi_drv->remove)
scmi_drv->remove(scmi_dev);
}
struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
const char *mod_name)
{
int retval;
if (!driver->probe)
return -EINVAL;
retval = scmi_protocol_device_request(driver->id_table);
if (retval)
return retval;
driver->driver.bus = &scmi_bus_type;
driver->driver.name = driver->name;
driver->driver.owner = owner;
driver->driver.mod_name = mod_name;
retval = driver_register(&driver->driver);
if (!retval)
pr_debug("Registered new scmi driver %s\n", driver->name);
return retval;
}
EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
scmi_protocol_device_unrequest(driver->id_table);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
static void scmi_device_release(struct device *dev)
{
kfree(to_scmi_dev(dev));
}
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
of_node_full_name(scmi_dev->dev.parent->of_node),
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
if (scmi_dev->protocol_id == SCMI_PROTOCOL_SYSTEM)
atomic_set(&scmi_syspower_registered, 0);
kfree_const(scmi_dev->name);
ida_free(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
}
static struct scmi_device *
__scmi_device_create(struct device_node *np, struct device *parent,
int protocol, const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
/*
* If the same protocol/name device already exist under the same parent
* (i.e. SCMI instance) just return the existent device.
* This avoids any race between the SCMI driver, creating devices for
* each DT defined protocol at probe time, and the concurrent
* registration of SCMI drivers.
*/
scmi_dev = scmi_child_dev_find(parent, protocol, name);
if (scmi_dev)
return scmi_dev;
/*
* Ignore any possible subsequent failures while creating the device
* since we are doomed anyway at that point; not using a mutex which
* spans across this whole function to keep things simple and to avoid
* to serialize all the __scmi_device_create calls across possibly
* different SCMI server instances (parent)
*/
if (protocol == SCMI_PROTOCOL_SYSTEM &&
atomic_cmpxchg(&scmi_syspower_registered, 0, 1)) {
dev_warn(parent,
"SCMI SystemPower protocol device must be unique !\n");
return NULL;
}
scmi_dev = kzalloc(sizeof(*scmi_dev), GFP_KERNEL);
if (!scmi_dev)
return NULL;
scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
if (!scmi_dev->name) {
kfree(scmi_dev);
return NULL;
}
id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL);
if (id < 0) {
kfree_const(scmi_dev->name);
kfree(scmi_dev);
return NULL;
}
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
scmi_dev->dev.parent = parent;
device_set_node(&scmi_dev->dev, of_fwnode_handle(np));
scmi_dev->dev.bus = &scmi_bus_type;
scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
if (retval)
goto put_dev;
pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
of_node_full_name(parent->of_node),
dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_free(&scmi_bus_id, id);
return NULL;
}
/**
* scmi_device_create - A method to create one or more SCMI devices
*
* @np: A reference to the device node to use for the new device(s)
* @parent: The parent device to use identifying a specific SCMI instance
* @protocol: The SCMI protocol to be associated with this device
* @name: The requested-name of the device to be created; this is optional
* and if no @name is provided, all the devices currently known to
* be requested on the SCMI bus for @protocol will be created.
*
* This method can be invoked to create a single well-defined device (like
* a transport device or a device requested by an SCMI driver loaded after
* the core SCMI stack has been probed), or to create all the devices currently
* known to have been requested by the loaded SCMI drivers for a specific
* protocol (typically during SCMI core protocol enumeration at probe time).
*
* Return: The created device (or one of them if @name was NOT provided and
* multiple devices were created) or NULL if no device was created;
* note that NULL indicates an error ONLY in case a specific @name
* was provided: when @name param was not provided, a number of devices
* could have been potentially created for a whole protocol, unless no
* device was found to have been requested for that specific protocol.
*/
struct scmi_device *scmi_device_create(struct device_node *np,
struct device *parent, int protocol,
const char *name)
{
struct list_head *phead;
struct scmi_requested_dev *rdev;
struct scmi_device *scmi_dev = NULL;
if (name)
return __scmi_device_create(np, parent, protocol, name);
mutex_lock(&scmi_requested_devices_mtx);
phead = idr_find(&scmi_requested_devices, protocol);
/* Nothing to do. */
if (!phead) {
mutex_unlock(&scmi_requested_devices_mtx);
return NULL;
}
/* Walk the list of requested devices for protocol and create them */
list_for_each_entry(rdev, phead, node) {
struct scmi_device *sdev;
sdev = __scmi_device_create(np, parent,
rdev->id_table->protocol_id,
rdev->id_table->name);
/* Report errors and carry on... */
if (sdev)
scmi_dev = sdev;
else
pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
of_node_full_name(parent->of_node),
rdev->id_table->protocol_id,
rdev->id_table->name);
}
mutex_unlock(&scmi_requested_devices_mtx);
return scmi_dev;
}
EXPORT_SYMBOL_GPL(scmi_device_create);
void scmi_device_destroy(struct device *parent, int protocol, const char *name)
{
struct scmi_device *scmi_dev;
scmi_dev = scmi_child_dev_find(parent, protocol, name);
if (scmi_dev)
__scmi_device_destroy(scmi_dev);
}
EXPORT_SYMBOL_GPL(scmi_device_destroy);
static int __scmi_devices_unregister(struct device *dev, void *data)
{
struct scmi_device *scmi_dev = to_scmi_dev(dev);
__scmi_device_destroy(scmi_dev);
return 0;
}
static void scmi_devices_unregister(void)
{
bus_for_each_dev(&scmi_bus_type, NULL, NULL, __scmi_devices_unregister);
}
static int __init scmi_bus_init(void)
{
int retval;
retval = bus_register(&scmi_bus_type);
if (retval)
pr_err("SCMI protocol bus register failed (%d)\n", retval);
pr_info("SCMI protocol bus registered\n");
return retval;
}
subsys_initcall(scmi_bus_init);
static void __exit scmi_bus_exit(void)
{
/*
* Destroy all remaining devices: just in case the drivers were
* manually unbound and at first and then the modules unloaded.
*/
scmi_devices_unregister();
bus_unregister(&scmi_bus_type);
ida_destroy(&scmi_bus_id);
}
module_exit(scmi_bus_exit);
MODULE_ALIAS("scmi-core");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI protocol bus");
MODULE_LICENSE("GPL");
| linux-master | drivers/firmware/arm_scmi/bus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
* Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "protocols.h"
#include "notify.h"
#define SCMI_MAX_NUM_SENSOR_AXIS 63
#define SCMIv2_SENSOR_PROTOCOL 0x10000
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_TRIP_POINT_NOTIFY = 0x4,
SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
SENSOR_AXIS_DESCRIPTION_GET = 0x7,
SENSOR_LIST_UPDATE_INTERVALS = 0x8,
SENSOR_CONFIG_GET = 0x9,
SENSOR_CONFIG_SET = 0xA,
SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
SENSOR_NAME_GET = 0xC,
SENSOR_AXIS_NAME_GET = 0xD,
};
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
u8 reserved;
__le32 reg_addr_low;
__le32 reg_addr_high;
__le32 reg_size;
};
/* v3 attributes_low macros */
#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x))
#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x))
#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x))
#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x))
/* v2 attributes_high macros */
#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x))
#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x))
/* v3 attributes_high macros */
#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x))
#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x))
/* v3 resolution macros */
#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x))
#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x))
struct scmi_msg_resp_attrs {
__le32 min_range_low;
__le32 min_range_high;
__le32 max_range_low;
__le32 max_range_high;
};
struct scmi_msg_sensor_description {
__le32 desc_index;
};
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
struct scmi_sensor_descriptor {
__le32 id;
__le32 attributes_low;
/* Common attributes_low macros */
#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
#define SUPPORTS_EXTENDED_NAMES(x) FIELD_GET(BIT(29), (x))
#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
__le32 attributes_high;
/* Common attributes_high macros */
#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x))
#define SENSOR_SCALE_SIGN BIT(4)
#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
/* only for version > 2.0 */
__le32 power;
__le32 resolution;
struct scmi_msg_resp_attrs scalar_attrs;
} desc[];
};
/* Base scmi_sensor_descriptor size excluding extended attrs after name */
#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28
/* Sign extend to a full s32 */
#define S32_EXT(v) \
({ \
int __v = (v); \
\
if (__v & SENSOR_SCALE_SIGN) \
__v |= SENSOR_SCALE_EXTEND; \
__v; \
})
struct scmi_msg_sensor_axis_description_get {
__le32 id;
__le32 axis_desc_index;
};
struct scmi_msg_resp_sensor_axis_description {
__le32 num_axis_flags;
#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x))
#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x))
struct scmi_axis_descriptor {
__le32 id;
__le32 attributes_low;
#define SUPPORTS_EXTENDED_AXIS_NAMES(x) FIELD_GET(BIT(9), (x))
__le32 attributes_high;
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 resolution;
struct scmi_msg_resp_attrs attrs;
} desc[];
};
struct scmi_msg_resp_sensor_axis_names_description {
__le32 num_axis_flags;
struct scmi_sensor_axis_name_descriptor {
__le32 axis_id;
u8 name[SCMI_MAX_STR_SIZE];
} desc[];
};
/* Base scmi_axis_descriptor size excluding extended attrs after name */
#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
struct scmi_msg_sensor_list_update_intervals {
__le32 id;
__le32 index;
};
struct scmi_msg_resp_sensor_list_update_intervals {
__le32 num_intervals_flags;
#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x))
#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x))
#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x))
__le32 intervals[];
};
struct scmi_msg_sensor_request_notify {
__le32 id;
__le32 event_control;
#define SENSOR_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
__le32 id;
__le32 event_control;
#define SENSOR_TP_EVENT_MASK (0x3)
#define SENSOR_TP_DISABLED 0x0
#define SENSOR_TP_POSITIVE 0x1
#define SENSOR_TP_NEGATIVE 0x2
#define SENSOR_TP_BOTH 0x3
#define SENSOR_TP_ID(x) (((x) & 0xff) << 4)
__le32 value_low;
__le32 value_high;
};
struct scmi_msg_sensor_config_set {
__le32 id;
__le32 sensor_config;
};
struct scmi_msg_sensor_reading_get {
__le32 id;
__le32 flags;
#define SENSOR_READ_ASYNC BIT(0)
};
struct scmi_resp_sensor_reading_complete {
__le32 id;
__le32 readings_low;
__le32 readings_high;
};
struct scmi_sensor_reading_resp {
__le32 sensor_value_low;
__le32 sensor_value_high;
__le32 timestamp_low;
__le32 timestamp_high;
};
struct scmi_resp_sensor_reading_complete_v3 {
__le32 id;
struct scmi_sensor_reading_resp readings[];
};
struct scmi_sensor_trip_notify_payld {
__le32 agent_id;
__le32 sensor_id;
__le32 trip_point_desc;
};
struct scmi_sensor_update_notify_payld {
__le32 agent_id;
__le32 sensor_id;
struct scmi_sensor_reading_resp readings[];
};
struct sensors_info {
u32 version;
int num_sensors;
int max_requests;
u64 reg_addr;
u32 reg_size;
struct scmi_sensor_info *sensors;
};
static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_sensor_attributes *attr;
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
0, sizeof(*attr), &t);
if (ret)
return ret;
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
si->num_sensors = le16_to_cpu(attr->num_sensors);
si->max_requests = attr->max_requests;
si->reg_addr = le32_to_cpu(attr->reg_addr_low) |
(u64)le32_to_cpu(attr->reg_addr_high) << 32;
si->reg_size = le32_to_cpu(attr->reg_size);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
const struct scmi_msg_resp_attrs *in)
{
out->min_range = get_unaligned_le64((void *)&in->min_range_low);
out->max_range = get_unaligned_le64((void *)&in->max_range_low);
}
struct scmi_sens_ipriv {
void *priv;
struct device *dev;
};
static void iter_intervals_prepare_message(void *message,
unsigned int desc_index,
const void *p)
{
struct scmi_msg_sensor_list_update_intervals *msg = message;
const struct scmi_sensor_info *s;
s = ((const struct scmi_sens_ipriv *)p)->priv;
/* Set the number of sensors to be skipped/already read */
msg->id = cpu_to_le32(s->id);
msg->index = cpu_to_le32(desc_index);
}
static int iter_intervals_update_state(struct scmi_iterator_state *st,
const void *response, void *p)
{
u32 flags;
struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
struct device *dev = ((struct scmi_sens_ipriv *)p)->dev;
const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
flags = le32_to_cpu(r->num_intervals_flags);
st->num_returned = NUM_INTERVALS_RETURNED(flags);
st->num_remaining = NUM_INTERVALS_REMAINING(flags);
/*
* Max intervals is not declared previously anywhere so we
* assume it's returned+remaining on first call.
*/
if (!st->max_resources) {
s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
s->intervals.count = st->num_returned + st->num_remaining;
/* segmented intervals are reported in one triplet */
if (s->intervals.segmented &&
(st->num_remaining || st->num_returned != 3)) {
dev_err(dev,
"Sensor ID:%d advertises an invalid segmented interval (%d)\n",
s->id, s->intervals.count);
s->intervals.segmented = false;
s->intervals.count = 0;
return -EINVAL;
}
/* Direct allocation when exceeding pre-allocated */
if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
s->intervals.desc =
devm_kcalloc(dev,
s->intervals.count,
sizeof(*s->intervals.desc),
GFP_KERNEL);
if (!s->intervals.desc) {
s->intervals.segmented = false;
s->intervals.count = 0;
return -ENOMEM;
}
}
st->max_resources = s->intervals.count;
}
return 0;
}
static int
iter_intervals_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *p)
{
const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
s->intervals.desc[st->desc_index + st->loop_idx] =
le32_to_cpu(r->intervals[st->loop_idx]);
return 0;
}
static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_intervals_prepare_message,
.update_state = iter_intervals_update_state,
.process_response = iter_intervals_process_response,
};
struct scmi_sens_ipriv upriv = {
.priv = s,
.dev = ph->dev,
};
iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
SENSOR_LIST_UPDATE_INTERVALS,
sizeof(struct scmi_msg_sensor_list_update_intervals),
&upriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
return ph->hops->iter_response_run(iter);
}
struct scmi_apriv {
bool any_axes_support_extended_names;
struct scmi_sensor_info *s;
};
static void iter_axes_desc_prepare_message(void *message,
const unsigned int desc_index,
const void *priv)
{
struct scmi_msg_sensor_axis_description_get *msg = message;
const struct scmi_apriv *apriv = priv;
/* Set the number of sensors to be skipped/already read */
msg->id = cpu_to_le32(apriv->s->id);
msg->axis_desc_index = cpu_to_le32(desc_index);
}
static int
iter_axes_desc_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
u32 flags;
const struct scmi_msg_resp_sensor_axis_description *r = response;
flags = le32_to_cpu(r->num_axis_flags);
st->num_returned = NUM_AXIS_RETURNED(flags);
st->num_remaining = NUM_AXIS_REMAINING(flags);
st->priv = (void *)&r->desc[0];
return 0;
}
static int
iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
u32 attrh, attrl;
struct scmi_sensor_axis_info *a;
size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
struct scmi_apriv *apriv = priv;
const struct scmi_axis_descriptor *adesc = st->priv;
attrl = le32_to_cpu(adesc->attributes_low);
if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl))
apriv->any_axes_support_extended_names = true;
a = &apriv->s->axis[st->desc_index + st->loop_idx];
a->id = le32_to_cpu(adesc->id);
a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
attrh = le32_to_cpu(adesc->attributes_high);
a->scale = S32_EXT(SENSOR_SCALE(attrh));
a->type = SENSOR_TYPE(attrh);
strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE);
if (a->extended_attrs) {
unsigned int ares = le32_to_cpu(adesc->resolution);
a->resolution = SENSOR_RES(ares);
a->exponent = S32_EXT(SENSOR_RES_EXP(ares));
dsize += sizeof(adesc->resolution);
scmi_parse_range_attrs(&a->attrs, &adesc->attrs);
dsize += sizeof(adesc->attrs);
}
st->priv = ((u8 *)adesc + dsize);
return 0;
}
static int
iter_axes_extended_name_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
u32 flags;
const struct scmi_msg_resp_sensor_axis_names_description *r = response;
flags = le32_to_cpu(r->num_axis_flags);
st->num_returned = NUM_AXIS_RETURNED(flags);
st->num_remaining = NUM_AXIS_REMAINING(flags);
st->priv = (void *)&r->desc[0];
return 0;
}
static int
iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st,
void *priv)
{
struct scmi_sensor_axis_info *a;
const struct scmi_apriv *apriv = priv;
struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
u32 axis_id = le32_to_cpu(adesc->axis_id);
if (axis_id >= st->max_resources)
return -EPROTO;
/*
* Pick the corresponding descriptor based on the axis_id embedded
* in the reply since the list of axes supporting extended names
* can be a subset of all the axes.
*/
a = &apriv->s->axis[axis_id];
strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
st->priv = ++adesc;
return 0;
}
static int
scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s)
{
int ret;
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_axes_desc_prepare_message,
.update_state = iter_axes_extended_name_update_state,
.process_response = iter_axes_extended_name_process_response,
};
struct scmi_apriv apriv = {
.any_axes_support_extended_names = false,
.s = s,
};
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
SENSOR_AXIS_NAME_GET,
sizeof(struct scmi_msg_sensor_axis_description_get),
&apriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
/*
* Do not cause whole protocol initialization failure when failing to
* get extended names for axes.
*/
ret = ph->hops->iter_response_run(iter);
if (ret)
dev_warn(ph->dev,
"Failed to get axes extended names for %s (ret:%d).\n",
s->name, ret);
return 0;
}
static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
struct scmi_sensor_info *s,
u32 version)
{
int ret;
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_axes_desc_prepare_message,
.update_state = iter_axes_desc_update_state,
.process_response = iter_axes_desc_process_response,
};
struct scmi_apriv apriv = {
.any_axes_support_extended_names = false,
.s = s,
};
s->axis = devm_kcalloc(ph->dev, s->num_axis,
sizeof(*s->axis), GFP_KERNEL);
if (!s->axis)
return -ENOMEM;
iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
SENSOR_AXIS_DESCRIPTION_GET,
sizeof(struct scmi_msg_sensor_axis_description_get),
&apriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
if (PROTOCOL_REV_MAJOR(version) >= 0x3 &&
apriv.any_axes_support_extended_names)
ret = scmi_sensor_axis_extended_names_get(ph, s);
return ret;
}
static void iter_sens_descr_prepare_message(void *message,
unsigned int desc_index,
const void *priv)
{
struct scmi_msg_sensor_description *msg = message;
msg->desc_index = cpu_to_le32(desc_index);
}
static int iter_sens_descr_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
const struct scmi_msg_resp_sensor_description *r = response;
st->num_returned = le16_to_cpu(r->num_returned);
st->num_remaining = le16_to_cpu(r->num_remaining);
st->priv = (void *)&r->desc[0];
return 0;
}
static int
iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
int ret = 0;
u32 attrh, attrl;
size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
struct scmi_sensor_info *s;
struct sensors_info *si = priv;
const struct scmi_sensor_descriptor *sdesc = st->priv;
s = &si->sensors[st->desc_index + st->loop_idx];
s->id = le32_to_cpu(sdesc->id);
attrl = le32_to_cpu(sdesc->attributes_low);
/* common bitfields parsing */
s->async = SUPPORTS_ASYNC_READ(attrl);
s->num_trip_points = NUM_TRIP_POINTS(attrl);
/**
* only SCMIv3.0 specific bitfield below.
* Such bitfields are assumed to be zeroed on non
* relevant fw versions...assuming fw not buggy !
*/
s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
s->timestamped = SUPPORTS_TIMESTAMP(attrl);
if (s->timestamped)
s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
attrh = le32_to_cpu(sdesc->attributes_high);
/* common bitfields parsing */
s->scale = S32_EXT(SENSOR_SCALE(attrh));
s->type = SENSOR_TYPE(attrh);
/* Use pre-allocated pool wherever possible */
s->intervals.desc = s->intervals.prealloc_pool;
if (si->version == SCMIv2_SENSOR_PROTOCOL) {
s->intervals.segmented = false;
s->intervals.count = 1;
/*
* Convert SCMIv2.0 update interval format to
* SCMIv3.0 to be used as the common exposed
* descriptor, accessible via common macros.
*/
s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) |
SENSOR_UPDATE_SCALE(attrh);
} else {
/*
* From SCMIv3.0 update intervals are retrieved
* via a dedicated (optional) command.
* Since the command is optional, on error carry
* on without any update interval.
*/
if (scmi_sensor_update_intervals(ph, s))
dev_dbg(ph->dev,
"Update Intervals not available for sensor ID:%d\n",
s->id);
}
/**
* only > SCMIv2.0 specific bitfield below.
* Such bitfields are assumed to be zeroed on non
* relevant fw versions...assuming fw not buggy !
*/
s->num_axis = min_t(unsigned int,
SUPPORTS_AXIS(attrh) ?
SENSOR_AXIS_NUMBER(attrh) : 0,
SCMI_MAX_NUM_SENSOR_AXIS);
strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE);
/*
* If supported overwrite short name with the extended
* one; on error just carry on and use already provided
* short name.
*/
if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(attrl))
ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id,
s->name, SCMI_MAX_STR_SIZE);
if (s->extended_scalar_attrs) {
s->sensor_power = le32_to_cpu(sdesc->power);
dsize += sizeof(sdesc->power);
/* Only for sensors reporting scalar values */
if (s->num_axis == 0) {
unsigned int sres = le32_to_cpu(sdesc->resolution);
s->resolution = SENSOR_RES(sres);
s->exponent = S32_EXT(SENSOR_RES_EXP(sres));
dsize += sizeof(sdesc->resolution);
scmi_parse_range_attrs(&s->scalar_attrs,
&sdesc->scalar_attrs);
dsize += sizeof(sdesc->scalar_attrs);
}
}
if (s->num_axis > 0)
ret = scmi_sensor_axis_description(ph, s, si->version);
st->priv = ((u8 *)sdesc + dsize);
return ret;
}
static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
struct sensors_info *si)
{
void *iter;
struct scmi_iterator_ops ops = {
.prepare_message = iter_sens_descr_prepare_message,
.update_state = iter_sens_descr_update_state,
.process_response = iter_sens_descr_process_response,
};
iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors,
SENSOR_DESCRIPTION_GET,
sizeof(__le32), si);
if (IS_ERR(iter))
return PTR_ERR(iter);
return ph->hops->iter_response_run(iter);
}
static inline int
scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
u8 message_id, bool enable)
{
int ret;
u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0;
struct scmi_xfer *t;
struct scmi_msg_sensor_request_notify *cfg;
ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*cfg), 0, &t);
if (ret)
return ret;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(sensor_id);
cfg->event_control = cpu_to_le32(evt_cntl);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_sensor_trip_point_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_TRIP_POINT_NOTIFY,
enable);
}
static int
scmi_sensor_continuous_update_notify(const struct scmi_protocol_handle *ph,
u32 sensor_id, bool enable)
{
return scmi_sensor_request_notify(ph, sensor_id,
SENSOR_CONTINUOUS_UPDATE_NOTIFY,
enable);
}
static int
scmi_sensor_trip_point_config(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value)
{
int ret;
u32 evt_cntl = SENSOR_TP_BOTH;
struct scmi_xfer *t;
struct scmi_msg_set_sensor_trip_point *trip;
ret = ph->xops->xfer_get_init(ph, SENSOR_TRIP_POINT_CONFIG,
sizeof(*trip), 0, &t);
if (ret)
return ret;
trip = t->tx.buf;
trip->id = cpu_to_le32(sensor_id);
trip->event_control = cpu_to_le32(evt_cntl | SENSOR_TP_ID(trip_id));
trip->value_low = cpu_to_le32(trip_value & 0xffffffff);
trip->value_high = cpu_to_le32(trip_value >> 32);
ret = ph->xops->do_xfer(ph, t);
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 *sensor_config)
{
int ret;
struct scmi_xfer *t;
struct sensors_info *si = ph->get_priv(ph);
if (sensor_id >= si->num_sensors)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
sizeof(__le32), sizeof(__le32), &t);
if (ret)
return ret;
put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
s->sensor_config = *sensor_config;
}
ph->xops->xfer_put(ph, t);
return ret;
}
static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
u32 sensor_id, u32 sensor_config)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
struct sensors_info *si = ph->get_priv(ph);
if (sensor_id >= si->num_sensors)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
sizeof(*msg), 0, &t);
if (ret)
return ret;
msg = t->tx.buf;
msg->id = cpu_to_le32(sensor_id);
msg->sensor_config = cpu_to_le32(sensor_config);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
}
ph->xops->xfer_put(ph, t);
return ret;
}
/**
* scmi_sensor_reading_get - Read scalar sensor value
* @ph: Protocol handle
* @sensor_id: Sensor ID
* @value: The 64bit value sensor reading
*
* This function returns a single 64 bit reading value representing the sensor
* value; if the platform SCMI Protocol implementation and the sensor support
* multiple axis and timestamped-reads, this just returns the first axis while
* dropping the timestamp value.
* Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of
* timestamped multi-axis values.
*
* Return: 0 on Success
*/
static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
u32 sensor_id, u64 *value)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
if (sensor_id >= si->num_sensors)
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
s = si->sensors + sensor_id;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_resp_sensor_reading_complete *resp;
resp = t->rx.buf;
if (le32_to_cpu(resp->id) == sensor_id)
*value =
get_unaligned_le64(&resp->readings_low);
else
ret = -EPROTO;
}
} else {
sensor->flags = cpu_to_le32(0);
ret = ph->xops->do_xfer(ph, t);
if (!ret)
*value = get_unaligned_le64(t->rx.buf);
}
ph->xops->xfer_put(ph, t);
return ret;
}
static inline void
scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
const struct scmi_sensor_reading_resp *in)
{
out->value = get_unaligned_le64((void *)&in->sensor_value_low);
out->timestamp = get_unaligned_le64((void *)&in->timestamp_low);
}
/**
* scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
* @ph: Protocol handle
* @sensor_id: Sensor ID
* @count: The length of the provided @readings array
* @readings: An array of elements each representing a timestamped per-axis
* reading of type @struct scmi_sensor_reading.
* Returned readings are ordered as the @axis descriptors array
* included in @struct scmi_sensor_info and the max number of
* returned elements is min(@count, @num_axis); ideally the provided
* array should be of length @count equal to @num_axis.
*
* Return: 0 on Success
*/
static int
scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 count,
struct scmi_sensor_reading *readings)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
if (sensor_id >= si->num_sensors)
return -EINVAL;
s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
int i;
struct scmi_resp_sensor_reading_complete_v3 *resp;
resp = t->rx.buf;
/* Retrieve only the number of requested axis anyway */
if (le32_to_cpu(resp->id) == sensor_id)
for (i = 0; i < count; i++)
scmi_parse_sensor_readings(&readings[i],
&resp->readings[i]);
else
ret = -EPROTO;
}
} else {
sensor->flags = cpu_to_le32(0);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
int i;
struct scmi_sensor_reading_resp *resp_readings;
resp_readings = t->rx.buf;
for (i = 0; i < count; i++)
scmi_parse_sensor_readings(&readings[i],
&resp_readings[i]);
}
}
ph->xops->xfer_put(ph, t);
return ret;
}
static const struct scmi_sensor_info *
scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
struct sensors_info *si = ph->get_priv(ph);
if (sensor_id >= si->num_sensors)
return NULL;
return si->sensors + sensor_id;
}
static int scmi_sensor_count_get(const struct scmi_protocol_handle *ph)
{
struct sensors_info *si = ph->get_priv(ph);
return si->num_sensors;
}
static const struct scmi_sensor_proto_ops sensor_proto_ops = {
.count_get = scmi_sensor_count_get,
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
.reading_get_timestamped = scmi_sensor_reading_get_timestamped,
.config_get = scmi_sensor_config_get,
.config_set = scmi_sensor_config_set,
};
static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
int ret;
switch (evt_id) {
case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
ret = scmi_sensor_trip_point_notify(ph, src_id, enable);
break;
case SCMI_EVENT_SENSOR_UPDATE:
ret = scmi_sensor_continuous_update_notify(ph, src_id, enable);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
return ret;
}
static void *
scmi_sensor_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
void *rep = NULL;
switch (evt_id) {
case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
{
const struct scmi_sensor_trip_notify_payld *p = payld;
struct scmi_sensor_trip_point_report *r = report;
if (sizeof(*p) != payld_sz)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->sensor_id = le32_to_cpu(p->sensor_id);
r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
*src_id = r->sensor_id;
rep = r;
break;
}
case SCMI_EVENT_SENSOR_UPDATE:
{
int i;
struct scmi_sensor_info *s;
const struct scmi_sensor_update_notify_payld *p = payld;
struct scmi_sensor_update_report *r = report;
struct sensors_info *sinfo = ph->get_priv(ph);
/* payld_sz is variable for this event */
r->sensor_id = le32_to_cpu(p->sensor_id);
if (r->sensor_id >= sinfo->num_sensors)
break;
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
s = &sinfo->sensors[r->sensor_id];
/*
* The generated report r (@struct scmi_sensor_update_report)
* was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS
* readings: here it is filled with the effective @num_axis
* readings defined for this sensor or 1 for scalar sensors.
*/
r->readings_count = s->num_axis ?: 1;
for (i = 0; i < r->readings_count; i++)
scmi_parse_sensor_readings(&r->readings[i],
&p->readings[i]);
*src_id = r->sensor_id;
rep = r;
break;
}
default:
break;
}
return rep;
}
static int scmi_sensor_get_num_sources(const struct scmi_protocol_handle *ph)
{
struct sensors_info *si = ph->get_priv(ph);
return si->num_sensors;
}
static const struct scmi_event sensor_events[] = {
{
.id = SCMI_EVENT_SENSOR_TRIP_POINT_EVENT,
.max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
.max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
},
{
.id = SCMI_EVENT_SENSOR_UPDATE,
.max_payld_sz =
sizeof(struct scmi_sensor_update_notify_payld) +
SCMI_MAX_NUM_SENSOR_AXIS *
sizeof(struct scmi_sensor_reading_resp),
.max_report_sz = sizeof(struct scmi_sensor_update_report) +
SCMI_MAX_NUM_SENSOR_AXIS *
sizeof(struct scmi_sensor_reading),
},
};
static const struct scmi_event_ops sensor_event_ops = {
.get_num_sources = scmi_sensor_get_num_sources,
.set_notify_enabled = scmi_sensor_set_notify_enabled,
.fill_custom_report = scmi_sensor_fill_custom_report,
};
static const struct scmi_protocol_events sensor_protocol_events = {
.queue_sz = SCMI_PROTO_QUEUE_SZ,
.ops = &sensor_event_ops,
.evts = sensor_events,
.num_events = ARRAY_SIZE(sensor_events),
};
static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
{
u32 version;
int ret;
struct sensors_info *sinfo;
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Sensor Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
sinfo = devm_kzalloc(ph->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->version = version;
ret = scmi_sensor_attributes_get(ph, sinfo);
if (ret)
return ret;
sinfo->sensors = devm_kcalloc(ph->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
ret = scmi_sensor_description_get(ph, sinfo);
if (ret)
return ret;
return ph->set_priv(ph, sinfo);
}
static const struct scmi_protocol scmi_sensors = {
.id = SCMI_PROTOCOL_SENSOR,
.owner = THIS_MODULE,
.instance_init = &scmi_sensors_protocol_init,
.ops = &sensor_proto_ops,
.events = &sensor_protocol_events,
};
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(sensors, scmi_sensors)
| linux-master | drivers/firmware/arm_scmi/sensors.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Notification support
*
* Copyright (C) 2020-2021 ARM Ltd.
*/
/**
* DOC: Theory of operation
*
* SCMI Protocol specification allows the platform to signal events to
* interested agents via notification messages: this is an implementation
* of the dispatch and delivery of such notifications to the interested users
* inside the Linux kernel.
*
* An SCMI Notification core instance is initialized for each active platform
* instance identified by the means of the usual &struct scmi_handle.
*
* Each SCMI Protocol implementation, during its initialization, registers with
* this core its set of supported events using scmi_register_protocol_events():
* all the needed descriptors are stored in the &struct registered_protocols and
* &struct registered_events arrays.
*
* Kernel users interested in some specific event can register their callbacks
* providing the usual notifier_block descriptor, since this core implements
* events' delivery using the standard Kernel notification chains machinery.
*
* Given the number of possible events defined by SCMI and the extensibility
* of the SCMI Protocol itself, the underlying notification chains are created
* and destroyed dynamically on demand depending on the number of users
* effectively registered for an event, so that no support structures or chains
* are allocated until at least one user has registered a notifier_block for
* such event. Similarly, events' generation itself is enabled at the platform
* level only after at least one user has registered, and it is shutdown after
* the last user for that event has gone.
*
* All users provided callbacks and allocated notification-chains are stored in
* the @registered_events_handlers hashtable. Callbacks' registration requests
* for still to be registered events are instead kept in the dedicated common
* hashtable @pending_events_handlers.
*
* An event is identified univocally by the tuple (proto_id, evt_id, src_id)
* and is served by its own dedicated notification chain; information contained
* in such tuples is used, in a few different ways, to generate the needed
* hash-keys.
*
* Here proto_id and evt_id are simply the protocol_id and message_id numbers
* as described in the SCMI Protocol specification, while src_id represents an
* optional, protocol dependent, source identifier (like domain_id, perf_id
* or sensor_id and so forth).
*
* Upon reception of a notification message from the platform the SCMI RX ISR
* passes the received message payload and some ancillary information (including
* an arrival timestamp in nanoseconds) to the core via @scmi_notify() which
* pushes the event-data itself on a protocol-dedicated kfifo queue for further
* deferred processing as specified in @scmi_events_dispatcher().
*
* Each protocol has it own dedicated work_struct and worker which, once kicked
* by the ISR, takes care to empty its own dedicated queue, deliverying the
* queued items into the proper notification-chain: notifications processing can
* proceed concurrently on distinct workers only between events belonging to
* different protocols while delivery of events within the same protocol is
* still strictly sequentially ordered by time of arrival.
*
* Events' information is then extracted from the SCMI Notification messages and
* conveyed, converted into a custom per-event report struct, as the void *data
* param to the user callback provided by the registered notifier_block, so that
* from the user perspective his callback will look invoked like:
*
* int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)
*
*/
#define dev_fmt(fmt) "SCMI Notifications - " fmt
#define pr_fmt(fmt) "SCMI Notifications - " fmt
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hashtable.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/kfifo.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "common.h"
#include "notify.h"
#define SCMI_MAX_PROTO 256
#define PROTO_ID_MASK GENMASK(31, 24)
#define EVT_ID_MASK GENMASK(23, 16)
#define SRC_ID_MASK GENMASK(15, 0)
/*
* Builds an unsigned 32bit key from the given input tuple to be used
* as a key in hashtables.
*/
#define MAKE_HASH_KEY(p, e, s) \
(FIELD_PREP(PROTO_ID_MASK, (p)) | \
FIELD_PREP(EVT_ID_MASK, (e)) | \
FIELD_PREP(SRC_ID_MASK, (s)))
#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)
/*
* Assumes that the stored obj includes its own hash-key in a field named 'key':
* with this simplification this macro can be equally used for all the objects'
* types hashed by this implementation.
*
* @__ht: The hashtable name
* @__obj: A pointer to the object type to be retrieved from the hashtable;
* it will be used as a cursor while scanning the hastable and it will
* be possibly left as NULL when @__k is not found
* @__k: The key to search for
*/
#define KEY_FIND(__ht, __obj, __k) \
({ \
typeof(__k) k_ = __k; \
typeof(__obj) obj_; \
\
hash_for_each_possible((__ht), obj_, hash, k_) \
if (obj_->key == k_) \
break; \
__obj = obj_; \
})
#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))
#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))
#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))
/*
* A set of macros used to access safely @registered_protocols and
* @registered_events arrays; these are fixed in size and each entry is possibly
* populated at protocols' registration time and then only read but NEVER
* modified or removed.
*/
#define SCMI_GET_PROTO(__ni, __pid) \
({ \
typeof(__ni) ni_ = __ni; \
struct scmi_registered_events_desc *__pd = NULL; \
\
if (ni_) \
__pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \
__pd; \
})
#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \
({ \
typeof(__pd) pd_ = __pd; \
typeof(__eid) eid_ = __eid; \
struct scmi_registered_event *__revt = NULL; \
\
if (pd_ && eid_ < pd_->num_events) \
__revt = READ_ONCE(pd_->registered_events[eid_]); \
__revt; \
})
#define SCMI_GET_REVT(__ni, __pid, __eid) \
({ \
struct scmi_registered_event *__revt; \
struct scmi_registered_events_desc *__pd; \
\
__pd = SCMI_GET_PROTO((__ni), (__pid)); \
__revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \
__revt; \
})
/* A couple of utility macros to limit cruft when calling protocols' helpers */
#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \
({ \
typeof(revt) r = revt; \
r->proto->ops->set_notify_enabled(r->proto->ph, \
(eid), (sid), (state)); \
})
#define REVT_NOTIFY_ENABLE(revt, eid, sid) \
REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)
#define REVT_NOTIFY_DISABLE(revt, eid, sid) \
REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)
#define REVT_FILL_REPORT(revt, ...) \
({ \
typeof(revt) r = revt; \
r->proto->ops->fill_custom_report(r->proto->ph, \
__VA_ARGS__); \
})
#define SCMI_PENDING_HASH_SZ 4
#define SCMI_REGISTERED_HASH_SZ 6
struct scmi_registered_events_desc;
/**
* struct scmi_notify_instance - Represents an instance of the notification
* core
* @gid: GroupID used for devres
* @handle: A reference to the platform instance
* @init_work: A work item to perform final initializations of pending handlers
* @notify_wq: A reference to the allocated Kernel cmwq
* @pending_mtx: A mutex to protect @pending_events_handlers
* @registered_protocols: A statically allocated array containing pointers to
* all the registered protocol-level specific information
* related to events' handling
* @pending_events_handlers: An hashtable containing all pending events'
* handlers descriptors
*
* Each platform instance, represented by a handle, has its own instance of
* the notification subsystem represented by this structure.
*/
struct scmi_notify_instance {
void *gid;
struct scmi_handle *handle;
struct work_struct init_work;
struct workqueue_struct *notify_wq;
/* lock to protect pending_events_handlers */
struct mutex pending_mtx;
struct scmi_registered_events_desc **registered_protocols;
DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);
};
/**
* struct events_queue - Describes a queue and its associated worker
* @sz: Size in bytes of the related kfifo
* @kfifo: A dedicated Kernel kfifo descriptor
* @notify_work: A custom work item bound to this queue
* @wq: A reference to the associated workqueue
*
* Each protocol has its own dedicated events_queue descriptor.
*/
struct events_queue {
size_t sz;
struct kfifo kfifo;
struct work_struct notify_work;
struct workqueue_struct *wq;
};
/**
* struct scmi_event_header - A utility header
* @timestamp: The timestamp, in nanoseconds (boottime), which was associated
* to this event as soon as it entered the SCMI RX ISR
* @payld_sz: Effective size of the embedded message payload which follows
* @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)
* @payld: A reference to the embedded event payload
*
* This header is prepended to each received event message payload before
* queueing it on the related &struct events_queue.
*/
struct scmi_event_header {
ktime_t timestamp;
size_t payld_sz;
unsigned char evt_id;
unsigned char payld[];
};
struct scmi_registered_event;
/**
* struct scmi_registered_events_desc - Protocol Specific information
* @id: Protocol ID
* @ops: Protocol specific and event-related operations
* @equeue: The embedded per-protocol events_queue
* @ni: A reference to the initialized instance descriptor
* @eh: A reference to pre-allocated buffer to be used as a scratch area by the
* deferred worker when fetching data from the kfifo
* @eh_sz: Size of the pre-allocated buffer @eh
* @in_flight: A reference to an in flight &struct scmi_registered_event
* @num_events: Number of events in @registered_events
* @registered_events: A dynamically allocated array holding all the registered
* events' descriptors, whose fixed-size is determined at
* compile time.
* @registered_mtx: A mutex to protect @registered_events_handlers
* @ph: SCMI protocol handle reference
* @registered_events_handlers: An hashtable containing all events' handlers
* descriptors registered for this protocol
*
* All protocols that register at least one event have their protocol-specific
* information stored here, together with the embedded allocated events_queue.
* These descriptors are stored in the @registered_protocols array at protocol
* registration time.
*
* Once these descriptors are successfully registered, they are NEVER again
* removed or modified since protocols do not unregister ever, so that, once
* we safely grab a NON-NULL reference from the array we can keep it and use it.
*/
struct scmi_registered_events_desc {
u8 id;
const struct scmi_event_ops *ops;
struct events_queue equeue;
struct scmi_notify_instance *ni;
struct scmi_event_header *eh;
size_t eh_sz;
void *in_flight;
int num_events;
struct scmi_registered_event **registered_events;
/* mutex to protect registered_events_handlers */
struct mutex registered_mtx;
const struct scmi_protocol_handle *ph;
DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);
};
/**
* struct scmi_registered_event - Event Specific Information
* @proto: A reference to the associated protocol descriptor
* @evt: A reference to the associated event descriptor (as provided at
* registration time)
* @report: A pre-allocated buffer used by the deferred worker to fill a
* customized event report
* @num_sources: The number of possible sources for this event as stated at
* events' registration time
* @sources: A reference to a dynamically allocated array used to refcount the
* events' enable requests for all the existing sources
* @sources_mtx: A mutex to serialize the access to @sources
*
* All registered events are represented by one of these structures that are
* stored in the @registered_events array at protocol registration time.
*
* Once these descriptors are successfully registered, they are NEVER again
* removed or modified since protocols do not unregister ever, so that once we
* safely grab a NON-NULL reference from the table we can keep it and use it.
*/
struct scmi_registered_event {
struct scmi_registered_events_desc *proto;
const struct scmi_event *evt;
void *report;
u32 num_sources;
refcount_t *sources;
/* locking to serialize the access to sources */
struct mutex sources_mtx;
};
/**
* struct scmi_event_handler - Event handler information
* @key: The used hashkey
* @users: A reference count for number of active users for this handler
* @r_evt: A reference to the associated registered event; when this is NULL
* this handler is pending, which means that identifies a set of
* callbacks intended to be attached to an event which is still not
* known nor registered by any protocol at that point in time
* @chain: The notification chain dedicated to this specific event tuple
* @hash: The hlist_node used for collision handling
* @enabled: A boolean which records if event's generation has been already
* enabled for this handler as a whole
*
* This structure collects all the information needed to process a received
* event identified by the tuple (proto_id, evt_id, src_id).
* These descriptors are stored in a per-protocol @registered_events_handlers
* table using as a key a value derived from that tuple.
*/
struct scmi_event_handler {
u32 key;
refcount_t users;
struct scmi_registered_event *r_evt;
struct blocking_notifier_head chain;
struct hlist_node hash;
bool enabled;
};
#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)
static struct scmi_event_handler *
scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl);
/**
* scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it
* @ni: A reference to the notification instance to use
* @evt_key: The key to use to lookup the related notification chain
* @report: The customized event-specific report to pass down to the callbacks
* as their *data parameter.
*/
static inline void
scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,
u32 evt_key, void *report)
{
int ret;
struct scmi_event_handler *hndl;
/*
* Here ensure the event handler cannot vanish while using it.
* It is legitimate, though, for an handler not to be found at all here,
* e.g. when it has been unregistered by the user after some events had
* already been queued.
*/
hndl = scmi_get_active_handler(ni, evt_key);
if (!hndl)
return;
ret = blocking_notifier_call_chain(&hndl->chain,
KEY_XTRACT_EVT_ID(evt_key),
report);
/* Notifiers are NOT supposed to cut the chain ... */
WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);
scmi_put_active_handler(ni, hndl);
}
/**
* scmi_process_event_header() - Dequeue and process an event header
* @eq: The queue to use
* @pd: The protocol descriptor to use
*
* Read an event header from the protocol queue into the dedicated scratch
* buffer and looks for a matching registered event; in case an anomalously
* sized read is detected just flush the queue.
*
* Return:
* * a reference to the matching registered event when found
* * ERR_PTR(-EINVAL) when NO registered event could be found
* * NULL when the queue is empty
*/
static inline struct scmi_registered_event *
scmi_process_event_header(struct events_queue *eq,
struct scmi_registered_events_desc *pd)
{
unsigned int outs;
struct scmi_registered_event *r_evt;
outs = kfifo_out(&eq->kfifo, pd->eh,
sizeof(struct scmi_event_header));
if (!outs)
return NULL;
if (outs != sizeof(struct scmi_event_header)) {
dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");
kfifo_reset_out(&eq->kfifo);
return NULL;
}
r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);
if (!r_evt)
r_evt = ERR_PTR(-EINVAL);
return r_evt;
}
/**
* scmi_process_event_payload() - Dequeue and process an event payload
* @eq: The queue to use
* @pd: The protocol descriptor to use
* @r_evt: The registered event descriptor to use
*
* Read an event payload from the protocol queue into the dedicated scratch
* buffer, fills a custom report and then look for matching event handlers and
* call them; skip any unknown event (as marked by scmi_process_event_header())
* and in case an anomalously sized read is detected just flush the queue.
*
* Return: False when the queue is empty
*/
static inline bool
scmi_process_event_payload(struct events_queue *eq,
struct scmi_registered_events_desc *pd,
struct scmi_registered_event *r_evt)
{
u32 src_id, key;
unsigned int outs;
void *report = NULL;
outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);
if (!outs)
return false;
/* Any in-flight event has now been officially processed */
pd->in_flight = NULL;
if (outs != pd->eh->payld_sz) {
dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");
kfifo_reset_out(&eq->kfifo);
return false;
}
if (IS_ERR(r_evt)) {
dev_warn(pd->ni->handle->dev,
"SKIP UNKNOWN EVT - proto:%X evt:%d\n",
pd->id, pd->eh->evt_id);
return true;
}
report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,
pd->eh->payld, pd->eh->payld_sz,
r_evt->report, &src_id);
if (!report) {
dev_err(pd->ni->handle->dev,
"report not available - proto:%X evt:%d\n",
pd->id, pd->eh->evt_id);
return true;
}
/* At first search for a generic ALL src_ids handler... */
key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);
scmi_lookup_and_call_event_chain(pd->ni, key, report);
/* ...then search for any specific src_id */
key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);
scmi_lookup_and_call_event_chain(pd->ni, key, report);
return true;
}
/**
* scmi_events_dispatcher() - Common worker logic for all work items.
* @work: The work item to use, which is associated to a dedicated events_queue
*
* Logic:
* 1. dequeue one pending RX notification (queued in SCMI RX ISR context)
* 2. generate a custom event report from the received event message
* 3. lookup for any registered ALL_SRC_IDs handler:
* - > call the related notification chain passing in the report
* 4. lookup for any registered specific SRC_ID handler:
* - > call the related notification chain passing in the report
*
* Note that:
* * a dedicated per-protocol kfifo queue is used: in this way an anomalous
* flood of events cannot saturate other protocols' queues.
* * each per-protocol queue is associated to a distinct work_item, which
* means, in turn, that:
* + all protocols can process their dedicated queues concurrently
* (since notify_wq:max_active != 1)
* + anyway at most one worker instance is allowed to run on the same queue
* concurrently: this ensures that we can have only one concurrent
* reader/writer on the associated kfifo, so that we can use it lock-less
*
* Context: Process context.
*/
static void scmi_events_dispatcher(struct work_struct *work)
{
struct events_queue *eq;
struct scmi_registered_events_desc *pd;
struct scmi_registered_event *r_evt;
eq = container_of(work, struct events_queue, notify_work);
pd = container_of(eq, struct scmi_registered_events_desc, equeue);
/*
* In order to keep the queue lock-less and the number of memcopies
* to the bare minimum needed, the dispatcher accounts for the
* possibility of per-protocol in-flight events: i.e. an event whose
* reception could end up being split across two subsequent runs of this
* worker, first the header, then the payload.
*/
do {
if (!pd->in_flight) {
r_evt = scmi_process_event_header(eq, pd);
if (!r_evt)
break;
pd->in_flight = r_evt;
} else {
r_evt = pd->in_flight;
}
} while (scmi_process_event_payload(eq, pd, r_evt));
}
/**
* scmi_notify() - Queues a notification for further deferred processing
* @handle: The handle identifying the platform instance from which the
* dispatched event is generated
* @proto_id: Protocol ID
* @evt_id: Event ID (msgID)
* @buf: Event Message Payload (without the header)
* @len: Event Message Payload size
* @ts: RX Timestamp in nanoseconds (boottime)
*
* Context: Called in interrupt context to queue a received event for
* deferred processing.
*
* Return: 0 on Success
*/
int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,
const void *buf, size_t len, ktime_t ts)
{
struct scmi_registered_event *r_evt;
struct scmi_event_header eh;
struct scmi_notify_instance *ni;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return 0;
r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);
if (!r_evt)
return -EINVAL;
if (len > r_evt->evt->max_payld_sz) {
dev_err(handle->dev, "discard badly sized message\n");
return -EINVAL;
}
if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {
dev_warn(handle->dev,
"queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",
proto_id, evt_id, ktime_to_ns(ts));
return -ENOMEM;
}
eh.timestamp = ts;
eh.evt_id = evt_id;
eh.payld_sz = len;
/*
* Header and payload are enqueued with two distinct kfifo_in() (so non
* atomic), but this situation is handled properly on the consumer side
* with in-flight events tracking.
*/
kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));
kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);
/*
* Don't care about return value here since we just want to ensure that
* a work is queued all the times whenever some items have been pushed
* on the kfifo:
* - if work was already queued it will simply fail to queue a new one
* since it is not needed
* - if work was not queued already it will be now, even in case work
* was in fact already running: this behavior avoids any possible race
* when this function pushes new items onto the kfifos after the
* related executing worker had already determined the kfifo to be
* empty and it was terminating.
*/
queue_work(r_evt->proto->equeue.wq,
&r_evt->proto->equeue.notify_work);
return 0;
}
/**
* scmi_kfifo_free() - Devres action helper to free the kfifo
* @kfifo: The kfifo to free
*/
static void scmi_kfifo_free(void *kfifo)
{
kfifo_free((struct kfifo *)kfifo);
}
/**
* scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer
* @ni: A reference to the notification instance to use
* @equeue: The events_queue to initialize
* @sz: Size of the kfifo buffer to allocate
*
* Allocate a buffer for the kfifo and initialize it.
*
* Return: 0 on Success
*/
static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,
struct events_queue *equeue, size_t sz)
{
int ret;
if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))
return -ENOMEM;
/* Size could have been roundup to power-of-two */
equeue->sz = kfifo_size(&equeue->kfifo);
ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,
&equeue->kfifo);
if (ret)
return ret;
INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);
equeue->wq = ni->notify_wq;
return ret;
}
/**
* scmi_allocate_registered_events_desc() - Allocate a registered events'
* descriptor
* @ni: A reference to the &struct scmi_notify_instance notification instance
* to use
* @proto_id: Protocol ID
* @queue_sz: Size of the associated queue to allocate
* @eh_sz: Size of the event header scratch area to pre-allocate
* @num_events: Number of events to support (size of @registered_events)
* @ops: Pointer to a struct holding references to protocol specific helpers
* needed during events handling
*
* It is supposed to be called only once for each protocol at protocol
* initialization time, so it warns if the requested protocol is found already
* registered.
*
* Return: The allocated and registered descriptor on Success
*/
static struct scmi_registered_events_desc *
scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,
u8 proto_id, size_t queue_sz, size_t eh_sz,
int num_events,
const struct scmi_event_ops *ops)
{
int ret;
struct scmi_registered_events_desc *pd;
/* Ensure protocols are up to date */
smp_rmb();
if (WARN_ON(ni->registered_protocols[proto_id]))
return ERR_PTR(-EINVAL);
pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
pd->id = proto_id;
pd->ops = ops;
pd->ni = ni;
ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);
if (ret)
return ERR_PTR(ret);
pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);
if (!pd->eh)
return ERR_PTR(-ENOMEM);
pd->eh_sz = eh_sz;
pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,
sizeof(char *), GFP_KERNEL);
if (!pd->registered_events)
return ERR_PTR(-ENOMEM);
pd->num_events = num_events;
/* Initialize per protocol handlers table */
mutex_init(&pd->registered_mtx);
hash_init(pd->registered_events_handlers);
return pd;
}
/**
* scmi_register_protocol_events() - Register Protocol Events with the core
* @handle: The handle identifying the platform instance against which the
* protocol's events are registered
* @proto_id: Protocol ID
* @ph: SCMI protocol handle.
* @ee: A structure describing the events supported by this protocol.
*
* Used by SCMI Protocols initialization code to register with the notification
* core the list of supported events and their descriptors: takes care to
* pre-allocate and store all needed descriptors, scratch buffers and event
* queues.
*
* Return: 0 on Success
*/
int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
const struct scmi_protocol_handle *ph,
const struct scmi_protocol_events *ee)
{
int i;
unsigned int num_sources;
size_t payld_sz = 0;
struct scmi_registered_events_desc *pd;
struct scmi_notify_instance *ni;
const struct scmi_event *evt;
if (!ee || !ee->ops || !ee->evts || !ph ||
(!ee->num_sources && !ee->ops->get_num_sources))
return -EINVAL;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return -ENOMEM;
/* num_sources cannot be <= 0 */
if (ee->num_sources) {
num_sources = ee->num_sources;
} else {
int nsrc = ee->ops->get_num_sources(ph);
if (nsrc <= 0)
return -EINVAL;
num_sources = nsrc;
}
evt = ee->evts;
for (i = 0; i < ee->num_events; i++)
payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);
payld_sz += sizeof(struct scmi_event_header);
pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,
payld_sz, ee->num_events,
ee->ops);
if (IS_ERR(pd))
return PTR_ERR(pd);
pd->ph = ph;
for (i = 0; i < ee->num_events; i++, evt++) {
struct scmi_registered_event *r_evt;
r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
GFP_KERNEL);
if (!r_evt)
return -ENOMEM;
r_evt->proto = pd;
r_evt->evt = evt;
r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,
sizeof(refcount_t), GFP_KERNEL);
if (!r_evt->sources)
return -ENOMEM;
r_evt->num_sources = num_sources;
mutex_init(&r_evt->sources_mtx);
r_evt->report = devm_kzalloc(ni->handle->dev,
evt->max_report_sz, GFP_KERNEL);
if (!r_evt->report)
return -ENOMEM;
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
smp_wmb();
dev_dbg(handle->dev, "registered event - %lX\n",
MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));
}
/* Register protocol and events...it will never be removed */
ni->registered_protocols[proto_id] = pd;
/* Ensure protocols are updated */
smp_wmb();
/*
* Finalize any pending events' handler which could have been waiting
* for this protocol's events registration.
*/
schedule_work(&ni->init_work);
return 0;
}
/**
* scmi_deregister_protocol_events - Deregister protocol events with the core
* @handle: The handle identifying the platform instance against which the
* protocol's events are registered
* @proto_id: Protocol ID
*/
void scmi_deregister_protocol_events(const struct scmi_handle *handle,
u8 proto_id)
{
struct scmi_notify_instance *ni;
struct scmi_registered_events_desc *pd;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return;
pd = ni->registered_protocols[proto_id];
if (!pd)
return;
ni->registered_protocols[proto_id] = NULL;
/* Ensure protocols are updated */
smp_wmb();
cancel_work_sync(&pd->equeue.notify_work);
}
/**
* scmi_allocate_event_handler() - Allocate Event handler
* @ni: A reference to the notification instance to use
* @evt_key: 32bit key uniquely bind to the event identified by the tuple
* (proto_id, evt_id, src_id)
*
* Allocate an event handler and related notification chain associated with
* the provided event handler key.
* Note that, at this point, a related registered_event is still to be
* associated to this handler descriptor (hndl->r_evt == NULL), so the handler
* is initialized as pending.
*
* Context: Assumes to be called with @pending_mtx already acquired.
* Return: the freshly allocated structure on Success
*/
static struct scmi_event_handler *
scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)
{
struct scmi_event_handler *hndl;
hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);
if (!hndl)
return NULL;
hndl->key = evt_key;
BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);
refcount_set(&hndl->users, 1);
/* New handlers are created pending */
hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);
return hndl;
}
/**
* scmi_free_event_handler() - Free the provided Event handler
* @hndl: The event handler structure to free
*
* Context: Assumes to be called with proper locking acquired depending
* on the situation.
*/
static void scmi_free_event_handler(struct scmi_event_handler *hndl)
{
hash_del(&hndl->hash);
kfree(hndl);
}
/**
* scmi_bind_event_handler() - Helper to attempt binding an handler to an event
* @ni: A reference to the notification instance to use
* @hndl: The event handler to bind
*
* If an associated registered event is found, move the handler from the pending
* into the registered table.
*
* Context: Assumes to be called with @pending_mtx already acquired.
*
* Return: 0 on Success
*/
static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
struct scmi_registered_event *r_evt;
r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),
KEY_XTRACT_EVT_ID(hndl->key));
if (!r_evt)
return -EINVAL;
/*
* Remove from pending and insert into registered while getting hold
* of protocol instance.
*/
hash_del(&hndl->hash);
/*
* Acquire protocols only for NON pending handlers, so as NOT to trigger
* protocol initialization when a notifier is registered against a still
* not registered protocol, since it would make little sense to force init
* protocols for which still no SCMI driver user exists: they wouldn't
* emit any event anyway till some SCMI driver starts using it.
*/
scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));
hndl->r_evt = r_evt;
mutex_lock(&r_evt->proto->registered_mtx);
hash_add(r_evt->proto->registered_events_handlers,
&hndl->hash, hndl->key);
mutex_unlock(&r_evt->proto->registered_mtx);
return 0;
}
/**
* scmi_valid_pending_handler() - Helper to check pending status of handlers
* @ni: A reference to the notification instance to use
* @hndl: The event handler to check
*
* An handler is considered pending when its r_evt == NULL, because the related
* event was still unknown at handler's registration time; anyway, since all
* protocols register their supported events once for all at protocols'
* initialization time, a pending handler cannot be considered valid anymore if
* the underlying event (which it is waiting for), belongs to an already
* initialized and registered protocol.
*
* Return: 0 on Success
*/
static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
struct scmi_registered_events_desc *pd;
if (!IS_HNDL_PENDING(hndl))
return -EINVAL;
pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));
if (pd)
return -EINVAL;
return 0;
}
/**
* scmi_register_event_handler() - Register whenever possible an Event handler
* @ni: A reference to the notification instance to use
* @hndl: The event handler to register
*
* At first try to bind an event handler to its associated event, then check if
* it was at least a valid pending handler: if it was not bound nor valid return
* false.
*
* Valid pending incomplete bindings will be periodically retried by a dedicated
* worker which is kicked each time a new protocol completes its own
* registration phase.
*
* Context: Assumes to be called with @pending_mtx acquired.
*
* Return: 0 on Success
*/
static int scmi_register_event_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
int ret;
ret = scmi_bind_event_handler(ni, hndl);
if (!ret) {
dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",
hndl->key);
} else {
ret = scmi_valid_pending_handler(ni, hndl);
if (!ret)
dev_dbg(ni->handle->dev,
"registered PENDING handler - key:%X\n",
hndl->key);
}
return ret;
}
/**
* __scmi_event_handler_get_ops() - Utility to get or create an event handler
* @ni: A reference to the notification instance to use
* @evt_key: The event key to use
* @create: A boolean flag to specify if a handler must be created when
* not already existent
*
* Search for the desired handler matching the key in both the per-protocol
* registered table and the common pending table:
* * if found adjust users refcount
* * if not found and @create is true, create and register the new handler:
* handler could end up being registered as pending if no matching event
* could be found.
*
* An handler is guaranteed to reside in one and only one of the tables at
* any one time; to ensure this the whole search and create is performed
* holding the @pending_mtx lock, with @registered_mtx additionally acquired
* if needed.
*
* Note that when a nested acquisition of these mutexes is needed the locking
* order is always (same as in @init_work):
* 1. pending_mtx
* 2. registered_mtx
*
* Events generation is NOT enabled right after creation within this routine
* since at creation time we usually want to have all setup and ready before
* events really start flowing.
*
* Return: A properly refcounted handler on Success, NULL on Failure
*/
static inline struct scmi_event_handler *
__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
u32 evt_key, bool create)
{
struct scmi_registered_event *r_evt;
struct scmi_event_handler *hndl = NULL;
r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
KEY_XTRACT_EVT_ID(evt_key));
mutex_lock(&ni->pending_mtx);
/* Search registered events at first ... if possible at all */
if (r_evt) {
mutex_lock(&r_evt->proto->registered_mtx);
hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
hndl, evt_key);
if (hndl)
refcount_inc(&hndl->users);
mutex_unlock(&r_evt->proto->registered_mtx);
}
/* ...then amongst pending. */
if (!hndl) {
hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);
if (hndl)
refcount_inc(&hndl->users);
}
/* Create if still not found and required */
if (!hndl && create) {
hndl = scmi_allocate_event_handler(ni, evt_key);
if (hndl && scmi_register_event_handler(ni, hndl)) {
dev_dbg(ni->handle->dev,
"purging UNKNOWN handler - key:%X\n",
hndl->key);
/* this hndl can be only a pending one */
scmi_put_handler_unlocked(ni, hndl);
hndl = NULL;
}
}
mutex_unlock(&ni->pending_mtx);
return hndl;
}
static struct scmi_event_handler *
scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)
{
return __scmi_event_handler_get_ops(ni, evt_key, false);
}
static struct scmi_event_handler *
scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)
{
return __scmi_event_handler_get_ops(ni, evt_key, true);
}
/**
* scmi_get_active_handler() - Helper to get active handlers only
* @ni: A reference to the notification instance to use
* @evt_key: The event key to use
*
* Search for the desired handler matching the key only in the per-protocol
* table of registered handlers: this is called only from the dispatching path
* so want to be as quick as possible and do not care about pending.
*
* Return: A properly refcounted active handler
*/
static struct scmi_event_handler *
scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)
{
struct scmi_registered_event *r_evt;
struct scmi_event_handler *hndl = NULL;
r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),
KEY_XTRACT_EVT_ID(evt_key));
if (r_evt) {
mutex_lock(&r_evt->proto->registered_mtx);
hndl = KEY_FIND(r_evt->proto->registered_events_handlers,
hndl, evt_key);
if (hndl)
refcount_inc(&hndl->users);
mutex_unlock(&r_evt->proto->registered_mtx);
}
return hndl;
}
/**
* __scmi_enable_evt() - Enable/disable events generation
* @r_evt: The registered event to act upon
* @src_id: The src_id to act upon
* @enable: The action to perform: true->Enable, false->Disable
*
* Takes care of proper refcounting while performing enable/disable: handles
* the special case of ALL sources requests by itself.
* Returns successfully if at least one of the required src_id has been
* successfully enabled/disabled.
*
* Return: 0 on Success
*/
static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
u32 src_id, bool enable)
{
int retvals = 0;
u32 num_sources;
refcount_t *sid;
if (src_id == SRC_ID_MASK) {
src_id = 0;
num_sources = r_evt->num_sources;
} else if (src_id < r_evt->num_sources) {
num_sources = 1;
} else {
return -EINVAL;
}
mutex_lock(&r_evt->sources_mtx);
if (enable) {
for (; num_sources; src_id++, num_sources--) {
int ret = 0;
sid = &r_evt->sources[src_id];
if (refcount_read(sid) == 0) {
ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
src_id);
if (!ret)
refcount_set(sid, 1);
} else {
refcount_inc(sid);
}
retvals += !ret;
}
} else {
for (; num_sources; src_id++, num_sources--) {
sid = &r_evt->sources[src_id];
if (refcount_dec_and_test(sid))
REVT_NOTIFY_DISABLE(r_evt,
r_evt->evt->id, src_id);
}
retvals = 1;
}
mutex_unlock(&r_evt->sources_mtx);
return retvals ? 0 : -EINVAL;
}
static int scmi_enable_events(struct scmi_event_handler *hndl)
{
int ret = 0;
if (!hndl->enabled) {
ret = __scmi_enable_evt(hndl->r_evt,
KEY_XTRACT_SRC_ID(hndl->key), true);
if (!ret)
hndl->enabled = true;
}
return ret;
}
static int scmi_disable_events(struct scmi_event_handler *hndl)
{
int ret = 0;
if (hndl->enabled) {
ret = __scmi_enable_evt(hndl->r_evt,
KEY_XTRACT_SRC_ID(hndl->key), false);
if (!ret)
hndl->enabled = false;
}
return ret;
}
/**
* scmi_put_handler_unlocked() - Put an event handler
* @ni: A reference to the notification instance to use
* @hndl: The event handler to act upon
*
* After having got exclusive access to the registered handlers hashtable,
* update the refcount and if @hndl is no more in use by anyone:
* * ask for events' generation disabling
* * unregister and free the handler itself
*
* Context: Assumes all the proper locking has been managed by the caller.
*
* Return: True if handler was freed (users dropped to zero)
*/
static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
bool freed = false;
if (refcount_dec_and_test(&hndl->users)) {
if (!IS_HNDL_PENDING(hndl))
scmi_disable_events(hndl);
scmi_free_event_handler(hndl);
freed = true;
}
return freed;
}
static void scmi_put_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
bool freed;
u8 protocol_id;
struct scmi_registered_event *r_evt = hndl->r_evt;
mutex_lock(&ni->pending_mtx);
if (r_evt) {
protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
}
freed = scmi_put_handler_unlocked(ni, hndl);
if (r_evt) {
mutex_unlock(&r_evt->proto->registered_mtx);
/*
* Only registered handler acquired protocol; must be here
* released only AFTER unlocking registered_mtx, since
* releasing a protocol can trigger its de-initialization
* (ie. including r_evt and registered_mtx)
*/
if (freed)
scmi_protocol_release(ni->handle, protocol_id);
}
mutex_unlock(&ni->pending_mtx);
}
static void scmi_put_active_handler(struct scmi_notify_instance *ni,
struct scmi_event_handler *hndl)
{
bool freed;
struct scmi_registered_event *r_evt = hndl->r_evt;
u8 protocol_id = r_evt->proto->id;
mutex_lock(&r_evt->proto->registered_mtx);
freed = scmi_put_handler_unlocked(ni, hndl);
mutex_unlock(&r_evt->proto->registered_mtx);
if (freed)
scmi_protocol_release(ni->handle, protocol_id);
}
/**
* scmi_event_handler_enable_events() - Enable events associated to an handler
* @hndl: The Event handler to act upon
*
* Return: 0 on Success
*/
static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)
{
if (scmi_enable_events(hndl)) {
pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);
return -EINVAL;
}
return 0;
}
/**
* scmi_notifier_register() - Register a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is registered
* @proto_id: Protocol ID
* @evt_id: Event ID
* @src_id: Source ID, when NULL register for events coming form ALL possible
* sources
* @nb: A standard notifier block to register for the specified event
*
* Generic helper to register a notifier_block against a protocol event.
*
* A notifier_block @nb will be registered for each distinct event identified
* by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain
* so that:
*
* (proto_X, evt_Y, src_Z) --> chain_X_Y_Z
*
* @src_id meaning is protocol specific and identifies the origin of the event
* (like domain_id, sensor_id and so forth).
*
* @src_id can be NULL to signify that the caller is interested in receiving
* notifications from ALL the available sources for that protocol OR simply that
* the protocol does not support distinct sources.
*
* As soon as one user for the specified tuple appears, an handler is created,
* and that specific event's generation is enabled at the platform level, unless
* an associated registered event is found missing, meaning that the needed
* protocol is still to be initialized and the handler has just been registered
* as still pending.
*
* Return: 0 on Success
*/
static int scmi_notifier_register(const struct scmi_handle *handle,
u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
int ret = 0;
u32 evt_key;
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return -ENODEV;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_or_create_handler(ni, evt_key);
if (!hndl)
return -EINVAL;
blocking_notifier_chain_register(&hndl->chain, nb);
/* Enable events for not pending handlers */
if (!IS_HNDL_PENDING(hndl)) {
ret = scmi_event_handler_enable_events(hndl);
if (ret)
scmi_put_handler(ni, hndl);
}
return ret;
}
/**
* scmi_notifier_unregister() - Unregister a notifier_block for an event
* @handle: The handle identifying the platform instance against which the
* callback is unregistered
* @proto_id: Protocol ID
* @evt_id: Event ID
* @src_id: Source ID
* @nb: The notifier_block to unregister
*
* Takes care to unregister the provided @nb from the notification chain
* associated to the specified event and, if there are no more users for the
* event handler, frees also the associated event handler structures.
* (this could possibly cause disabling of event's generation at platform level)
*
* Return: 0 on Success
*/
static int scmi_notifier_unregister(const struct scmi_handle *handle,
u8 proto_id, u8 evt_id, const u32 *src_id,
struct notifier_block *nb)
{
u32 evt_key;
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return -ENODEV;
evt_key = MAKE_HASH_KEY(proto_id, evt_id,
src_id ? *src_id : SRC_ID_MASK);
hndl = scmi_get_handler(ni, evt_key);
if (!hndl)
return -EINVAL;
/*
* Note that this chain unregistration call is safe on its own
* being internally protected by an rwsem.
*/
blocking_notifier_chain_unregister(&hndl->chain, nb);
scmi_put_handler(ni, hndl);
/*
* This balances the initial get issued in @scmi_notifier_register.
* If this notifier_block happened to be the last known user callback
* for this event, the handler is here freed and the event's generation
* stopped.
*
* Note that, an ongoing concurrent lookup on the delivery workqueue
* path could still hold the refcount to 1 even after this routine
* completes: in such a case it will be the final put on the delivery
* path which will finally free this unused handler.
*/
scmi_put_handler(ni, hndl);
return 0;
}
struct scmi_notifier_devres {
const struct scmi_handle *handle;
u8 proto_id;
u8 evt_id;
u32 __src_id;
u32 *src_id;
struct notifier_block *nb;
};
static void scmi_devm_release_notifier(struct device *dev, void *res)
{
struct scmi_notifier_devres *dres = res;
scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,
dres->src_id, dres->nb);
}
/**
* scmi_devm_notifier_register() - Managed registration of a notifier_block
* for an event
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
* @proto_id: Protocol ID
* @evt_id: Event ID
* @src_id: Source ID, when NULL register for events coming form ALL possible
* sources
* @nb: A standard notifier block to register for the specified event
*
* Generic devres managed helper to register a notifier_block against a
* protocol event.
*
* Return: 0 on Success
*/
static int scmi_devm_notifier_register(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
const u32 *src_id,
struct notifier_block *nb)
{
int ret;
struct scmi_notifier_devres *dres;
dres = devres_alloc(scmi_devm_release_notifier,
sizeof(*dres), GFP_KERNEL);
if (!dres)
return -ENOMEM;
ret = scmi_notifier_register(sdev->handle, proto_id,
evt_id, src_id, nb);
if (ret) {
devres_free(dres);
return ret;
}
dres->handle = sdev->handle;
dres->proto_id = proto_id;
dres->evt_id = evt_id;
dres->nb = nb;
if (src_id) {
dres->__src_id = *src_id;
dres->src_id = &dres->__src_id;
} else {
dres->src_id = NULL;
}
devres_add(&sdev->dev, dres);
return ret;
}
static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)
{
struct scmi_notifier_devres *dres = res;
struct scmi_notifier_devres *xres = data;
if (WARN_ON(!dres || !xres))
return 0;
return dres->proto_id == xres->proto_id &&
dres->evt_id == xres->evt_id &&
dres->nb == xres->nb &&
((!dres->src_id && !xres->src_id) ||
(dres->src_id && xres->src_id &&
dres->__src_id == xres->__src_id));
}
/**
* scmi_devm_notifier_unregister() - Managed un-registration of a
* notifier_block for an event
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
* @proto_id: Protocol ID
* @evt_id: Event ID
* @src_id: Source ID, when NULL register for events coming form ALL possible
* sources
* @nb: A standard notifier block to register for the specified event
*
* Generic devres managed helper to explicitly un-register a notifier_block
* against a protocol event, which was previously registered using the above
* @scmi_devm_notifier_register.
*
* Return: 0 on Success
*/
static int scmi_devm_notifier_unregister(struct scmi_device *sdev,
u8 proto_id, u8 evt_id,
const u32 *src_id,
struct notifier_block *nb)
{
int ret;
struct scmi_notifier_devres dres;
dres.handle = sdev->handle;
dres.proto_id = proto_id;
dres.evt_id = evt_id;
if (src_id) {
dres.__src_id = *src_id;
dres.src_id = &dres.__src_id;
} else {
dres.src_id = NULL;
}
ret = devres_release(&sdev->dev, scmi_devm_release_notifier,
scmi_devm_notifier_match, &dres);
WARN_ON(ret);
return ret;
}
/**
* scmi_protocols_late_init() - Worker for late initialization
* @work: The work item to use associated to the proper SCMI instance
*
* This kicks in whenever a new protocol has completed its own registration via
* scmi_register_protocol_events(): it is in charge of scanning the table of
* pending handlers (registered by users while the related protocol was still
* not initialized) and finalizing their initialization whenever possible;
* invalid pending handlers are purged at this point in time.
*/
static void scmi_protocols_late_init(struct work_struct *work)
{
int bkt;
struct scmi_event_handler *hndl;
struct scmi_notify_instance *ni;
struct hlist_node *tmp;
ni = container_of(work, struct scmi_notify_instance, init_work);
/* Ensure protocols and events are up to date */
smp_rmb();
mutex_lock(&ni->pending_mtx);
hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {
int ret;
ret = scmi_bind_event_handler(ni, hndl);
if (!ret) {
dev_dbg(ni->handle->dev,
"finalized PENDING handler - key:%X\n",
hndl->key);
ret = scmi_event_handler_enable_events(hndl);
if (ret) {
dev_dbg(ni->handle->dev,
"purging INVALID handler - key:%X\n",
hndl->key);
scmi_put_active_handler(ni, hndl);
}
} else {
ret = scmi_valid_pending_handler(ni, hndl);
if (ret) {
dev_dbg(ni->handle->dev,
"purging PENDING handler - key:%X\n",
hndl->key);
/* this hndl can be only a pending one */
scmi_put_handler_unlocked(ni, hndl);
}
}
}
mutex_unlock(&ni->pending_mtx);
}
/*
* notify_ops are attached to the handle so that can be accessed
* directly from an scmi_driver to register its own notifiers.
*/
static const struct scmi_notify_ops notify_ops = {
.devm_event_notifier_register = scmi_devm_notifier_register,
.devm_event_notifier_unregister = scmi_devm_notifier_unregister,
.event_notifier_register = scmi_notifier_register,
.event_notifier_unregister = scmi_notifier_unregister,
};
/**
* scmi_notification_init() - Initializes Notification Core Support
* @handle: The handle identifying the platform instance to initialize
*
* This function lays out all the basic resources needed by the notification
* core instance identified by the provided handle: once done, all of the
* SCMI Protocols can register their events with the core during their own
* initializations.
*
* Note that failing to initialize the core notifications support does not
* cause the whole SCMI Protocols stack to fail its initialization.
*
* SCMI Notification Initialization happens in 2 steps:
* * initialization: basic common allocations (this function)
* * registration: protocols asynchronously come into life and registers their
* own supported list of events with the core; this causes
* further per-protocol allocations
*
* Any user's callback registration attempt, referring a still not registered
* event, will be registered as pending and finalized later (if possible)
* by scmi_protocols_late_init() work.
* This allows for lazy initialization of SCMI Protocols due to late (or
* missing) SCMI drivers' modules loading.
*
* Return: 0 on Success
*/
int scmi_notification_init(struct scmi_handle *handle)
{
void *gid;
struct scmi_notify_instance *ni;
gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
if (!gid)
return -ENOMEM;
ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);
if (!ni)
goto err;
ni->gid = gid;
ni->handle = handle;
ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
sizeof(char *), GFP_KERNEL);
if (!ni->registered_protocols)
goto err;
ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
goto err;
mutex_init(&ni->pending_mtx);
hash_init(ni->pending_events_handlers);
INIT_WORK(&ni->init_work, scmi_protocols_late_init);
scmi_notification_instance_data_set(handle, ni);
handle->notify_ops = ¬ify_ops;
/* Ensure handle is up to date */
smp_wmb();
dev_info(handle->dev, "Core Enabled.\n");
devres_close_group(handle->dev, ni->gid);
return 0;
err:
dev_warn(handle->dev, "Initialization Failed.\n");
devres_release_group(handle->dev, gid);
return -ENOMEM;
}
/**
* scmi_notification_exit() - Shutdown and clean Notification core
* @handle: The handle identifying the platform instance to shutdown
*/
void scmi_notification_exit(struct scmi_handle *handle)
{
struct scmi_notify_instance *ni;
ni = scmi_notification_instance_data_get(handle);
if (!ni)
return;
scmi_notification_instance_data_set(handle, NULL);
/* Destroy while letting pending work complete */
destroy_workqueue(ni->notify_wq);
devres_release_group(ni->handle->dev, ni->gid);
}
| linux-master | drivers/firmware/arm_scmi/notify.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Protocol driver
*
* SCMI Message Protocol is used between the System Control Processor(SCP)
* and the Application Processors(AP). The Message Handling Unit(MHU)
* provides a mechanism for inter-processor communication between SCP's
* Cortex M3 and AP.
*
* SCP offers control and management of the core/cluster power states,
* various power domain DVFS including the core/cluster, certain system
* clocks configuration, thermal sensors and many others.
*
* Copyright (C) 2018-2021 ARM Ltd.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/hashtable.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/processor.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include "common.h"
#include "notify.h"
#include "raw_mode.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
static DEFINE_IDA(scmi_id);
static DEFINE_IDR(scmi_protocols);
static DEFINE_SPINLOCK(protocol_lock);
/* List of all SCMI devices active in system */
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
static struct dentry *scmi_top_dentry;
/**
* struct scmi_xfers_info - Structure to manage transfer information
*
* @xfer_alloc_table: Bitmap table for allocated messages.
* Index of this bitmap table is also used for message
* sequence identifier.
* @xfer_lock: Protection for message allocation
* @max_msg: Maximum number of messages that can be pending
* @free_xfers: A free list for available to use xfers. It is initialized with
* a number of xfers equal to the maximum allowed in-flight
* messages.
* @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
* currently in-flight messages.
*/
struct scmi_xfers_info {
unsigned long *xfer_alloc_table;
spinlock_t xfer_lock;
int max_msg;
struct hlist_head free_xfers;
DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
};
/**
* struct scmi_protocol_instance - Describe an initialized protocol instance.
* @handle: Reference to the SCMI handle associated to this protocol instance.
* @proto: A reference to the protocol descriptor.
* @gid: A reference for per-protocol devres management.
* @users: A refcount to track effective users of this protocol.
* @priv: Reference for optional protocol private data.
* @ph: An embedded protocol handle that will be passed down to protocol
* initialization code to identify this instance.
*
* Each protocol is initialized independently once for each SCMI platform in
* which is defined by DT and implemented by the SCMI server fw.
*/
struct scmi_protocol_instance {
const struct scmi_handle *handle;
const struct scmi_protocol *proto;
void *gid;
refcount_t users;
void *priv;
struct scmi_protocol_handle ph;
};
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
* struct scmi_debug_info - Debug common info
* @top_dentry: A reference to the top debugfs dentry
* @name: Name of this SCMI instance
* @type: Type of this SCMI instance
* @is_atomic: Flag to state if the transport of this instance is atomic
*/
struct scmi_debug_info {
struct dentry *top_dentry;
const char *name;
const char *type;
bool is_atomic;
};
/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
* @dev: Device pointer
* @desc: SoC description for this instance
* @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification.
* @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
* @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols: IDR for protocols' instance descriptors initialized for
* this SCMI instance: populated on protocol's first attempted
* usage.
* @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
* scmi_revision_info.num_protocols elements allocated by the
* base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
* @atomic_threshold: Optional system wide DT-configured threshold, expressed
* in microseconds, for atomic operations.
* Only SCMI synchronous commands reported by the platform
* to have an execution latency lesser-equal to the threshold
* should be considered for atomic mode operation: such
* decision is finally left up to the SCMI drivers.
* @notify_priv: Pointer to private data structure specific to notifications.
* @node: List head
* @users: Number of users of this instance
* @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
* @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
* bus
* @devreq_mtx: A mutex to serialize device creation for this SCMI instance
* @dbg: A pointer to debugfs related data (if any)
* @raw: An opaque reference handle used by SCMI Raw mode.
*/
struct scmi_info {
int id;
struct device *dev;
const struct scmi_desc *desc;
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info tx_minfo;
struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
struct idr protocols;
/* Ensure mutual exclusive access to protocols instance array */
struct mutex protocols_mtx;
u8 *protocols_imp;
struct idr active_protocols;
unsigned int atomic_threshold;
void *notify_priv;
struct list_head node;
int users;
struct notifier_block bus_nb;
struct notifier_block dev_req_nb;
/* Serialize device creation process for this instance */
struct mutex devreq_mtx;
struct scmi_debug_info *dbg;
void *raw;
};
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
static const struct scmi_protocol *scmi_protocol_get(int protocol_id)
{
const struct scmi_protocol *proto;
proto = idr_find(&scmi_protocols, protocol_id);
if (!proto || !try_module_get(proto->owner)) {
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
return NULL;
}
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
return proto;
}
static void scmi_protocol_put(int protocol_id)
{
const struct scmi_protocol *proto;
proto = idr_find(&scmi_protocols, protocol_id);
if (proto)
module_put(proto->owner);
}
int scmi_protocol_register(const struct scmi_protocol *proto)
{
int ret;
if (!proto) {
pr_err("invalid protocol\n");
return -EINVAL;
}
if (!proto->instance_init) {
pr_err("missing init for protocol 0x%x\n", proto->id);
return -EINVAL;
}
spin_lock(&protocol_lock);
ret = idr_alloc(&scmi_protocols, (void *)proto,
proto->id, proto->id + 1, GFP_ATOMIC);
spin_unlock(&protocol_lock);
if (ret != proto->id) {
pr_err("unable to allocate SCMI idr slot for 0x%x - err %d\n",
proto->id, ret);
return ret;
}
pr_debug("Registered SCMI Protocol 0x%x\n", proto->id);
return 0;
}
EXPORT_SYMBOL_GPL(scmi_protocol_register);
void scmi_protocol_unregister(const struct scmi_protocol *proto)
{
spin_lock(&protocol_lock);
idr_remove(&scmi_protocols, proto->id);
spin_unlock(&protocol_lock);
pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
}
EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
/**
* scmi_create_protocol_devices - Create devices for all pending requests for
* this SCMI instance.
*
* @np: The device node describing the protocol
* @info: The SCMI instance descriptor
* @prot_id: The protocol ID
* @name: The optional name of the device to be created: if not provided this
* call will lead to the creation of all the devices currently requested
* for the specified protocol.
*/
static void scmi_create_protocol_devices(struct device_node *np,
struct scmi_info *info,
int prot_id, const char *name)
{
struct scmi_device *sdev;
mutex_lock(&info->devreq_mtx);
sdev = scmi_device_create(np, info->dev, prot_id, name);
if (name && !sdev)
dev_err(info->dev,
"failed to create device for protocol 0x%X (%s)\n",
prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
static void scmi_destroy_protocol_devices(struct scmi_info *info,
int prot_id, const char *name)
{
mutex_lock(&info->devreq_mtx);
scmi_device_destroy(info->dev, prot_id, name);
mutex_unlock(&info->devreq_mtx);
}
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
void *priv)
{
struct scmi_info *info = handle_to_scmi_info(handle);
info->notify_priv = priv;
/* Ensure updated protocol private date are visible */
smp_wmb();
}
void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
{
struct scmi_info *info = handle_to_scmi_info(handle);
/* Ensure protocols_private_data has been updated */
smp_rmb();
return info->notify_priv;
}
/**
* scmi_xfer_token_set - Reserve and set new token for the xfer at hand
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: The xfer to act upon
*
* Pick the next unused monotonically increasing token and set it into
* xfer->hdr.seq: picking a monotonically increasing value avoids immediate
* reuse of freshly completed or timed-out xfers, thus mitigating the risk
* of incorrect association of a late and expired xfer with a live in-flight
* transaction, both happening to re-use the same token identifier.
*
* Since platform is NOT required to answer our request in-order we should
* account for a few rare but possible scenarios:
*
* - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
* using find_next_zero_bit() starting from candidate next_token bit
*
* - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
* are plenty of free tokens at start, so try a second pass using
* find_next_zero_bit() and starting from 0.
*
* X = used in-flight
*
* Normal
* ------
*
* |- xfer_id picked
* -----------+----------------------------------------------------------
* | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
* ----------------------------------------------------------------------
* ^
* |- next_token
*
* Out-of-order pending at start
* -----------------------------
*
* |- xfer_id picked, last_token fixed
* -----+----------------------------------------------------------------
* |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
* ----------------------------------------------------------------------
* ^
* |- next_token
*
*
* Out-of-order pending at end
* ---------------------------
*
* |- xfer_id picked, last_token fixed
* -----+----------------------------------------------------------------
* |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
* ----------------------------------------------------------------------
* ^
* |- next_token
*
* Context: Assumes to be called with @xfer_lock already acquired.
*
* Return: 0 on Success or error
*/
static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
struct scmi_xfer *xfer)
{
unsigned long xfer_id, next_token;
/*
* Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
* using the pre-allocated transfer_id as a base.
* Note that the global transfer_id is shared across all message types
* so there could be holes in the allocated set of monotonic sequence
* numbers, but that is going to limit the effectiveness of the
* mitigation only in very rare limit conditions.
*/
next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
/* Pick the next available xfer_id >= next_token */
xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
MSG_TOKEN_MAX, next_token);
if (xfer_id == MSG_TOKEN_MAX) {
/*
* After heavily out-of-order responses, there are no free
* tokens ahead, but only at start of xfer_alloc_table so
* try again from the beginning.
*/
xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
MSG_TOKEN_MAX, 0);
/*
* Something is wrong if we got here since there can be a
* maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
* but we have not found any free token [0, MSG_TOKEN_MAX - 1].
*/
if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
return -ENOMEM;
}
/* Update +/- last_token accordingly if we skipped some hole */
if (xfer_id != next_token)
atomic_add((int)(xfer_id - next_token), &transfer_last_id);
xfer->hdr.seq = (u16)xfer_id;
return 0;
}
/**
* scmi_xfer_token_clear - Release the token
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: The xfer to act upon
*/
static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
struct scmi_xfer *xfer)
{
clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
}
/**
* scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
*
* @xfer: The xfer to register
* @minfo: Pointer to Tx/Rx Message management info based on channel type
*
* Note that this helper assumes that the xfer to be registered as in-flight
* had been built using an xfer sequence number which still corresponds to a
* free slot in the xfer_alloc_table.
*
* Context: Assumes to be called with @xfer_lock already acquired.
*/
static inline void
scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
struct scmi_xfers_info *minfo)
{
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
xfer->pending = true;
}
/**
* scmi_xfer_inflight_register - Try to register an xfer as in-flight
*
* @xfer: The xfer to register
* @minfo: Pointer to Tx/Rx Message management info based on channel type
*
* Note that this helper does NOT assume anything about the sequence number
* that was baked into the provided xfer, so it checks at first if it can
* be mapped to a free slot and fails with an error if another xfer with the
* same sequence number is currently still registered as in-flight.
*
* Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
* could not rbe mapped to a free slot in the xfer_alloc_table.
*/
static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
struct scmi_xfers_info *minfo)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&minfo->xfer_lock, flags);
if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
scmi_xfer_inflight_register_unlocked(xfer, minfo);
else
ret = -EBUSY;
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return ret;
}
/**
* scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
* flight on the TX channel, if possible.
*
* @handle: Pointer to SCMI entity handle
* @xfer: The xfer to register
*
* Return: 0 on Success, error otherwise
*/
int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
}
/**
* scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
* as pending in-flight
*
* @xfer: The xfer to act upon
* @minfo: Pointer to Tx/Rx Message management info based on channel type
*
* Return: 0 on Success or error otherwise
*/
static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
struct scmi_xfers_info *minfo)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&minfo->xfer_lock, flags);
/* Set a new monotonic token as the xfer sequence number */
ret = scmi_xfer_token_set(minfo, xfer);
if (!ret)
scmi_xfer_inflight_register_unlocked(xfer, minfo);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return ret;
}
/**
* scmi_xfer_get() - Allocate one message
*
* @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
*
* Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event.
*
* Picks an xfer from the free list @free_xfers (if any available) and perform
* a basic initialization.
*
* Note that, at this point, still no sequence number is assigned to the
* allocated xfer, nor it is registered as a pending transaction.
*
* The successfully initialized xfer is refcounted.
*
* Context: Holds @xfer_lock while manipulating @free_xfers.
*
* Return: An initialized xfer if all went fine, else pointer error.
*/
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
struct scmi_xfers_info *minfo)
{
unsigned long flags;
struct scmi_xfer *xfer;
spin_lock_irqsave(&minfo->xfer_lock, flags);
if (hlist_empty(&minfo->free_xfers)) {
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return ERR_PTR(-ENOMEM);
}
/* grab an xfer from the free_list */
xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
hlist_del_init(&xfer->node);
/*
* Allocate transfer_id early so that can be used also as base for
* monotonic sequence number generation if needed.
*/
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
refcount_set(&xfer->users, 1);
atomic_set(&xfer->busy, SCMI_XFER_FREE);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return xfer;
}
/**
* scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
*
* @handle: Pointer to SCMI entity handle
*
* Note that xfer is taken from the TX channel structures.
*
* Return: A valid xfer on Success, or an error-pointer otherwise
*/
struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
{
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(handle);
xfer = scmi_xfer_get(handle, &info->tx_minfo);
if (!IS_ERR(xfer))
xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
return xfer;
}
/**
* scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
* to use for a specific protocol_id Raw transaction.
*
* @handle: Pointer to SCMI entity handle
* @protocol_id: Identifier of the protocol
*
* Note that in a regular SCMI stack, usually, a protocol has to be defined in
* the DT to have an associated channel and be usable; but in Raw mode any
* protocol in range is allowed, re-using the Base channel, so as to enable
* fuzzing on any protocol without the need of a fully compiled DT.
*
* Return: A reference to the channel to use, or an ERR_PTR
*/
struct scmi_chan_info *
scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
{
struct scmi_chan_info *cinfo;
struct scmi_info *info = handle_to_scmi_info(handle);
cinfo = idr_find(&info->tx_idr, protocol_id);
if (!cinfo) {
if (protocol_id == SCMI_PROTOCOL_BASE)
return ERR_PTR(-EINVAL);
/* Use Base channel for protocols not defined for DT */
cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
if (!cinfo)
return ERR_PTR(-EINVAL);
dev_warn_once(handle->dev,
"Using Base channel for protocol 0x%X\n",
protocol_id);
}
return cinfo;
}
/**
* __scmi_xfer_put() - Release a message
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get
*
* After refcount check, possibly release an xfer, clearing the token slot,
* removing xfer from @pending_xfers and putting it back into free_xfers.
*
* This holds a spinlock to maintain integrity of internal data structures.
*/
static void
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{
unsigned long flags;
spin_lock_irqsave(&minfo->xfer_lock, flags);
if (refcount_dec_and_test(&xfer->users)) {
if (xfer->pending) {
scmi_xfer_token_clear(minfo, xfer);
hash_del(&xfer->node);
xfer->pending = false;
}
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
/**
* scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
*
* @handle: Pointer to SCMI entity handle
* @xfer: A reference to the xfer to put
*
* Note that as with other xfer_put() handlers the xfer is really effectively
* released only if there are no more users on the system.
*/
void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
/**
* scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
*
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer_id: Token ID to lookup in @pending_xfers
*
* Refcounting is untouched.
*
* Context: Assumes to be called with @xfer_lock already acquired.
*
* Return: A valid xfer on Success or error otherwise
*/
static struct scmi_xfer *
scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
{
struct scmi_xfer *xfer = NULL;
if (test_bit(xfer_id, minfo->xfer_alloc_table))
xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
return xfer ?: ERR_PTR(-EINVAL);
}
/**
* scmi_msg_response_validate - Validate message type against state of related
* xfer
*
* @cinfo: A reference to the channel descriptor.
* @msg_type: Message type to check
* @xfer: A reference to the xfer to validate against @msg_type
*
* This function checks if @msg_type is congruent with the current state of
* a pending @xfer; if an asynchronous delayed response is received before the
* related synchronous response (Out-of-Order Delayed Response) the missing
* synchronous response is assumed to be OK and completed, carrying on with the
* Delayed Response: this is done to address the case in which the underlying
* SCMI transport can deliver such out-of-order responses.
*
* Context: Assumes to be called with xfer->lock already acquired.
*
* Return: 0 on Success, error otherwise
*/
static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
u8 msg_type,
struct scmi_xfer *xfer)
{
/*
* Even if a response was indeed expected on this slot at this point,
* a buggy platform could wrongly reply feeding us an unexpected
* delayed response we're not prepared to handle: bail-out safely
* blaming firmware.
*/
if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
dev_err(cinfo->dev,
"Delayed Response for %d not expected! Buggy F/W ?\n",
xfer->hdr.seq);
return -EINVAL;
}
switch (xfer->state) {
case SCMI_XFER_SENT_OK:
if (msg_type == MSG_TYPE_DELAYED_RESP) {
/*
* Delayed Response expected but delivered earlier.
* Assume message RESPONSE was OK and skip state.
*/
xfer->hdr.status = SCMI_SUCCESS;
xfer->state = SCMI_XFER_RESP_OK;
complete(&xfer->done);
dev_warn(cinfo->dev,
"Received valid OoO Delayed Response for %d\n",
xfer->hdr.seq);
}
break;
case SCMI_XFER_RESP_OK:
if (msg_type != MSG_TYPE_DELAYED_RESP)
return -EINVAL;
break;
case SCMI_XFER_DRESP_OK:
/* No further message expected once in SCMI_XFER_DRESP_OK */
return -EINVAL;
}
return 0;
}
/**
* scmi_xfer_state_update - Update xfer state
*
* @xfer: A reference to the xfer to update
* @msg_type: Type of message being processed.
*
* Note that this message is assumed to have been already successfully validated
* by @scmi_msg_response_validate(), so here we just update the state.
*
* Context: Assumes to be called on an xfer exclusively acquired using the
* busy flag.
*/
static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
{
xfer->hdr.type = msg_type;
/* Unknown command types were already discarded earlier */
if (xfer->hdr.type == MSG_TYPE_COMMAND)
xfer->state = SCMI_XFER_RESP_OK;
else
xfer->state = SCMI_XFER_DRESP_OK;
}
static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
{
int ret;
ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
return ret == SCMI_XFER_FREE;
}
/**
* scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
*
* @cinfo: A reference to the channel descriptor.
* @msg_hdr: A message header to use as lookup key
*
* When a valid xfer is found for the sequence number embedded in the provided
* msg_hdr, reference counting is properly updated and exclusive access to this
* xfer is granted till released with @scmi_xfer_command_release.
*
* Return: A valid @xfer on Success or error otherwise.
*/
static inline struct scmi_xfer *
scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
int ret;
unsigned long flags;
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
/* Are we even expecting this? */
spin_lock_irqsave(&minfo->xfer_lock, flags);
xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
if (IS_ERR(xfer)) {
dev_err(cinfo->dev,
"Message for %d type %d is not expected!\n",
xfer_id, msg_type);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
return xfer;
}
refcount_inc(&xfer->users);
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
spin_lock_irqsave(&xfer->lock, flags);
ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
/*
* If a pending xfer was found which was also in a congruent state with
* the received message, acquire exclusive access to it setting the busy
* flag.
* Spins only on the rare limit condition of concurrent reception of
* RESP and DRESP for the same xfer.
*/
if (!ret) {
spin_until_cond(scmi_xfer_acquired(xfer));
scmi_xfer_state_update(xfer, msg_type);
}
spin_unlock_irqrestore(&xfer->lock, flags);
if (ret) {
dev_err(cinfo->dev,
"Invalid message type:%d for %d - HDR:0x%X state:%d\n",
msg_type, xfer_id, msg_hdr, xfer->state);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
xfer = ERR_PTR(-EINVAL);
}
return xfer;
}
static inline void scmi_xfer_command_release(struct scmi_info *info,
struct scmi_xfer *xfer)
{
atomic_set(&xfer->busy, SCMI_XFER_FREE);
__scmi_xfer_put(&info->tx_minfo, xfer);
}
static inline void scmi_clear_channel(struct scmi_info *info,
struct scmi_chan_info *cinfo)
{
if (info->desc->ops->clear_channel)
info->desc->ops->clear_channel(cinfo);
}
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
struct scmi_xfer *xfer;
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->rx_minfo;
ktime_t ts;
ts = ktime_get_boottime();
xfer = scmi_xfer_get(cinfo->handle, minfo);
if (IS_ERR(xfer)) {
dev_err(dev, "failed to get free message slot (%ld)\n",
PTR_ERR(xfer));
scmi_clear_channel(info, cinfo);
return;
}
unpack_scmi_header(msg_hdr, &xfer->hdr);
if (priv)
/* Ensure order between xfer->priv store and following ops */
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
xfer);
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
MSG_TYPE_NOTIFICATION);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
cinfo->id);
}
__scmi_xfer_put(minfo, xfer);
scmi_clear_channel(info, cinfo);
}
static void scmi_handle_response(struct scmi_chan_info *cinfo,
u32 msg_hdr, void *priv)
{
struct scmi_xfer *xfer;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
scmi_clear_channel(info, cinfo);
return;
}
/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
xfer->rx.len = info->desc->max_msg_size;
if (priv)
/* Ensure order between xfer->priv store and following ops */
smp_store_mb(xfer->priv, priv);
info->desc->ops->fetch_response(cinfo, xfer);
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id,
xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
(!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
(!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.type);
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
} else {
complete(&xfer->done);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
/*
* When in polling mode avoid to queue the Raw xfer on the IRQ
* RX path since it will be already queued at the end of the TX
* poll loop.
*/
if (!xfer->hdr.poll_completion)
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
}
scmi_xfer_command_release(info, xfer);
}
/**
* scmi_rx_callback() - callback for receiving messages
*
* @cinfo: SCMI channel info
* @msg_hdr: Message header
* @priv: Transport specific private data.
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
{
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
switch (msg_type) {
case MSG_TYPE_NOTIFICATION:
scmi_handle_notification(cinfo, msg_hdr, priv);
break;
case MSG_TYPE_COMMAND:
case MSG_TYPE_DELAYED_RESP:
scmi_handle_response(cinfo, msg_hdr, priv);
break;
default:
WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
break;
}
}
/**
* xfer_put() - Release a transmit message
*
* @ph: Pointer to SCMI protocol handle
* @xfer: message that was reserved by xfer_get_init
*/
static void xfer_put(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
__scmi_xfer_put(&info->tx_minfo, xfer);
}
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, ktime_t stop)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
/*
* Poll also on xfer->done so that polling can be forcibly terminated
* in case of out-of-order receptions of delayed responses
*/
return info->desc->ops->poll_done(cinfo, xfer) ||
try_wait_for_completion(&xfer->done) ||
ktime_after(ktime_get(), stop);
}
static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer, unsigned int timeout_ms)
{
int ret = 0;
if (xfer->hdr.poll_completion) {
/*
* Real polling is needed only if transport has NOT declared
* itself to support synchronous commands replies.
*/
if (!desc->sync_cmds_completed_on_ret) {
/*
* Poll on xfer using transport provided .poll_done();
* assumes no completion interrupt was available.
*/
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
xfer, stop));
if (ktime_after(ktime_get(), stop)) {
dev_err(dev,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
if (!ret) {
unsigned long flags;
struct scmi_info *info =
handle_to_scmi_info(cinfo->handle);
/*
* Do not fetch_response if an out-of-order delayed
* response is being processed.
*/
spin_lock_irqsave(&xfer->lock, flags);
if (xfer->state == SCMI_XFER_SENT_OK) {
desc->ops->fetch_response(cinfo, xfer);
xfer->state = SCMI_XFER_RESP_OK;
}
spin_unlock_irqrestore(&xfer->lock, flags);
/* Trace polled replies. */
trace_scmi_msg_dump(info->id, cinfo->id,
xfer->hdr.protocol_id, xfer->hdr.id,
!SCMI_XFER_IS_RAW(xfer) ?
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
struct scmi_info *info =
handle_to_scmi_info(cinfo->handle);
scmi_raw_message_report(info->raw, xfer,
SCMI_RAW_REPLY_QUEUE,
cinfo->id);
}
}
} else {
/* And we wait for the response. */
if (!wait_for_completion_timeout(&xfer->done,
msecs_to_jiffies(timeout_ms))) {
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
}
return ret;
}
/**
* scmi_wait_for_message_response - An helper to group all the possible ways of
* waiting for a synchronous message response.
*
* @cinfo: SCMI channel info
* @xfer: Reference to the transfer being waited for.
*
* Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
* configuration flags like xfer->hdr.poll_completion.
*
* Return: 0 on Success, error otherwise.
*/
static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct device *dev = info->dev;
trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
info->desc->max_rx_timeout_ms,
xfer->hdr.poll_completion);
return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
info->desc->max_rx_timeout_ms);
}
/**
* scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
* reply to an xfer raw request on a specific channel for the required timeout.
*
* @cinfo: SCMI channel info
* @xfer: Reference to the transfer being waited for.
* @timeout_ms: The maximum timeout in milliseconds
*
* Return: 0 on Success, error otherwise.
*/
int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer,
unsigned int timeout_ms)
{
int ret;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct device *dev = info->dev;
ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
if (ret)
dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
pack_scmi_header(&xfer->hdr));
return ret;
}
/**
* do_xfer() - Do one transfer
*
* @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Return: -ETIMEDOUT in case of no response, if transmit error,
* return corresponding error, else if all goes well,
* return 0.
*/
static int do_xfer(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct device *dev = info->dev;
struct scmi_chan_info *cinfo;
/* Check for polling request on custom command xfers at first */
if (xfer->hdr.poll_completion &&
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo))
return -EINVAL;
/* True ONLY if also supported by transport. */
if (is_polling_enabled(cinfo, info->desc))
xfer->hdr.poll_completion = true;
/*
* Initialise protocol id now from protocol handle to avoid it being
* overridden by mistake (or malice) by the protocol code mangling with
* the scmi_xfer structure prior to this.
*/
xfer->hdr.protocol_id = pi->proto->id;
reinit_completion(&xfer->done);
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
/* Clear any stale status */
xfer->hdr.status = SCMI_SUCCESS;
xfer->state = SCMI_XFER_SENT_OK;
/*
* Even though spinlocking is not needed here since no race is possible
* on xfer->state due to the monotonically increasing tokens allocation,
* we must anyway ensure xfer->state initialization is not re-ordered
* after the .send_message() to be sure that on the RX path an early
* ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
*/
smp_mb();
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status)
ret = scmi_to_linux_errno(xfer->hdr.status);
if (info->desc->ops->mark_txdone)
info->desc->ops->mark_txdone(cinfo, ret, xfer);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq, ret);
return ret;
}
static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
xfer->rx.len = info->desc->max_msg_size;
}
/**
* do_xfer_with_response() - Do one transfer and wait until the delayed
* response is received
*
* @ph: Pointer to SCMI protocol handle
* @xfer: Transfer to initiate and wait for response
*
* Using asynchronous commands in atomic/polling mode should be avoided since
* it could cause long busy-waiting here, so ignore polling for the delayed
* response and WARN if it was requested for this command transaction since
* upper layers should refrain from issuing such kind of requests.
*
* The only other option would have been to refrain from using any asynchronous
* command even if made available, when an atomic transport is detected, and
* instead forcibly use the synchronous version (thing that can be easily
* attained at the protocol layer), but this would also have led to longer
* stalls of the channel for synchronous commands and possibly timeouts.
* (in other words there is usually a good reason if a platform provides an
* asynchronous version of a command and we should prefer to use it...just not
* when using atomic/polling mode)
*
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
* return corresponding error, else if all goes well, return 0.
*/
static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer)
{
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
DECLARE_COMPLETION_ONSTACK(async_response);
xfer->async_done = &async_response;
/*
* Delayed responses should not be polled, so an async command should
* not have been used when requiring an atomic/poll context; WARN and
* perform instead a sleeping wait.
* (Note Async + IgnoreDelayedResponses are sent via do_xfer)
*/
WARN_ON_ONCE(xfer->hdr.poll_completion);
ret = do_xfer(ph, xfer);
if (!ret) {
if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
dev_err(ph->dev,
"timed out in delayed resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
} else if (xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
}
}
xfer->async_done = NULL;
return ret;
}
/**
* xfer_get_init() - Allocate and initialise one message for transmit
*
* @ph: Pointer to SCMI protocol handle
* @msg_id: Message identifier
* @tx_size: transmit message size
* @rx_size: receive message size
* @p: pointer to the allocated and initialised message
*
* This function allocates the message using @scmi_xfer_get and
* initialise the header.
*
* Return: 0 if all went fine with @p pointing to message, else
* corresponding error.
*/
static int xfer_get_init(const struct scmi_protocol_handle *ph,
u8 msg_id, size_t tx_size, size_t rx_size,
struct scmi_xfer **p)
{
int ret;
struct scmi_xfer *xfer;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct device *dev = info->dev;
/* Ensure we have sane transfer sizes */
if (rx_size > info->desc->max_msg_size ||
tx_size > info->desc->max_msg_size)
return -ERANGE;
xfer = scmi_xfer_get(pi->handle, minfo);
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
dev_err(dev, "failed to get free message slot(%d)\n", ret);
return ret;
}
/* Pick a sequence number and register this xfer as in-flight */
ret = scmi_xfer_pending_set(xfer, minfo);
if (ret) {
dev_err(pi->handle->dev,
"Failed to get monotonic token %d\n", ret);
__scmi_xfer_put(minfo, xfer);
return ret;
}
xfer->tx.len = tx_size;
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
xfer->hdr.type = MSG_TYPE_COMMAND;
xfer->hdr.id = msg_id;
xfer->hdr.poll_completion = false;
*p = xfer;
return 0;
}
/**
* version_get() - command to get the revision of the SCMI entity
*
* @ph: Pointer to SCMI protocol handle
* @version: Holds returned version of protocol.
*
* Updates the SCMI information in the internal data structure.
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
{
int ret;
__le32 *rev_info;
struct scmi_xfer *t;
ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
if (ret)
return ret;
ret = do_xfer(ph, t);
if (!ret) {
rev_info = t->rx.buf;
*version = le32_to_cpu(*rev_info);
}
xfer_put(ph, t);
return ret;
}
/**
* scmi_set_protocol_priv - Set protocol specific data at init time
*
* @ph: A reference to the protocol handle.
* @priv: The private data to set.
*
* Return: 0 on Success
*/
static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
void *priv)
{
struct scmi_protocol_instance *pi = ph_to_pi(ph);
pi->priv = priv;
return 0;
}
/**
* scmi_get_protocol_priv - Set protocol specific data at init time
*
* @ph: A reference to the protocol handle.
*
* Return: Protocol private data if any was set.
*/
static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
{
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
return pi->priv;
}
static const struct scmi_xfer_ops xfer_ops = {
.version_get = version_get,
.xfer_get_init = xfer_get_init,
.reset_rx_to_maxsz = reset_rx_to_maxsz,
.do_xfer = do_xfer,
.do_xfer_with_response = do_xfer_with_response,
.xfer_put = xfer_put,
};
struct scmi_msg_resp_domain_name_get {
__le32 flags;
u8 name[SCMI_MAX_STR_SIZE];
};
/**
* scmi_common_extended_name_get - Common helper to get extended resources name
* @ph: A protocol handle reference.
* @cmd_id: The specific command ID to use.
* @res_id: The specific resource ID to use.
* @name: A pointer to the preallocated area where the retrieved name will be
* stored as a NULL terminated string.
* @len: The len in bytes of the @name char array.
*
* Return: 0 on Succcess
*/
static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
u8 cmd_id, u32 res_id, char *name,
size_t len)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_domain_name_get *resp;
ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
sizeof(*resp), &t);
if (ret)
goto out;
put_unaligned_le32(res_id, t->tx.buf);
resp = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
strscpy(name, resp->name, len);
ph->xops->xfer_put(ph, t);
out:
if (ret)
dev_warn(ph->dev,
"Failed to get extended name - id:%u (ret:%d). Using %s\n",
res_id, ret, name);
return ret;
}
/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
* @resp: A reference to the response RX buffer; used by @update_state and
* @process_response to parse the multi-part replies.
* @t: A reference to the underlying xfer initialized and used transparently by
* the iterator internal routines.
* @ph: A reference to the associated protocol handle to be used.
* @ops: A reference to the custom provided iterator operations.
* @state: The current iterator state; used and updated in turn by the iterators
* internal routines and by the caller-provided @scmi_iterator_ops.
* @priv: A reference to optional private data as provided by the caller and
* passed back to the @@scmi_iterator_ops.
*/
struct scmi_iterator {
void *msg;
void *resp;
struct scmi_xfer *t;
const struct scmi_protocol_handle *ph;
struct scmi_iterator_ops *ops;
struct scmi_iterator_state state;
void *priv;
};
static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
struct scmi_iterator_ops *ops,
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv)
{
int ret;
struct scmi_iterator *i;
i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
if (!i)
return ERR_PTR(-ENOMEM);
i->ph = ph;
i->ops = ops;
i->priv = priv;
ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
if (ret) {
devm_kfree(ph->dev, i);
return ERR_PTR(ret);
}
i->state.max_resources = max_resources;
i->msg = i->t->tx.buf;
i->resp = i->t->rx.buf;
return i;
}
static int scmi_iterator_run(void *iter)
{
int ret = -EINVAL;
struct scmi_iterator_ops *iops;
const struct scmi_protocol_handle *ph;
struct scmi_iterator_state *st;
struct scmi_iterator *i = iter;
if (!i || !i->ops || !i->ph)
return ret;
iops = i->ops;
ph = i->ph;
st = &i->state;
do {
iops->prepare_message(i->msg, st->desc_index, i->priv);
ret = ph->xops->do_xfer(ph, i->t);
if (ret)
break;
st->rx_len = i->t->rx.len;
ret = iops->update_state(st, i->resp, i->priv);
if (ret)
break;
if (st->num_returned > st->max_resources - st->desc_index) {
dev_err(ph->dev,
"No. of resources can't exceed %d\n",
st->max_resources);
ret = -EINVAL;
break;
}
for (st->loop_idx = 0; st->loop_idx < st->num_returned;
st->loop_idx++) {
ret = iops->process_response(ph, i->resp, st, i->priv);
if (ret)
goto out;
}
st->desc_index += st->num_returned;
ph->xops->reset_rx_to_maxsz(ph, i->t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (st->num_returned && st->num_remaining);
out:
/* Finalize and destroy iterator */
ph->xops->xfer_put(ph, i->t);
devm_kfree(ph->dev, i);
return ret;
}
struct scmi_msg_get_fc_info {
__le32 domain;
__le32 message_id;
};
struct scmi_msg_resp_desc_fc {
__le32 attr;
#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
__le32 rate_limit;
__le32 chan_addr_low;
__le32 chan_addr_high;
__le32 chan_size;
__le32 db_addr_low;
__le32 db_addr_high;
__le32 db_set_lmask;
__le32 db_set_hmask;
__le32 db_preserve_lmask;
__le32 db_preserve_hmask;
};
static void
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id, u32 valid_size,
u32 domain, void __iomem **p_addr,
struct scmi_fc_db_info **p_db)
{
int ret;
u32 flags;
u64 phys_addr;
u8 size;
void __iomem *addr;
struct scmi_xfer *t;
struct scmi_fc_db_info *db = NULL;
struct scmi_msg_get_fc_info *info;
struct scmi_msg_resp_desc_fc *resp;
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
if (!p_addr) {
ret = -EINVAL;
goto err_out;
}
ret = ph->xops->xfer_get_init(ph, describe_id,
sizeof(*info), sizeof(*resp), &t);
if (ret)
goto err_out;
info = t->tx.buf;
info->domain = cpu_to_le32(domain);
info->message_id = cpu_to_le32(message_id);
/*
* Bail out on error leaving fc_info addresses zeroed; this includes
* the case in which the requested domain/message_id does NOT support
* fastchannels at all.
*/
ret = ph->xops->do_xfer(ph, t);
if (ret)
goto err_xfer;
resp = t->rx.buf;
flags = le32_to_cpu(resp->attr);
size = le32_to_cpu(resp->chan_size);
if (size != valid_size) {
ret = -EINVAL;
goto err_xfer;
}
phys_addr = le32_to_cpu(resp->chan_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr) {
ret = -EADDRNOTAVAIL;
goto err_xfer;
}
*p_addr = addr;
if (p_db && SUPPORTS_DOORBELL(flags)) {
db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
if (!db) {
ret = -ENOMEM;
goto err_db;
}
size = 1 << DOORBELL_REG_WIDTH(flags);
phys_addr = le32_to_cpu(resp->db_addr_low);
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
addr = devm_ioremap(ph->dev, phys_addr, size);
if (!addr) {
ret = -EADDRNOTAVAIL;
goto err_db_mem;
}
db->addr = addr;
db->width = size;
db->set = le32_to_cpu(resp->db_set_lmask);
db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
db->mask = le32_to_cpu(resp->db_preserve_lmask);
db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
*p_db = db;
}
ph->xops->xfer_put(ph, t);
dev_dbg(ph->dev,
"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
pi->proto->id, message_id, domain);
return;
err_db_mem:
devm_kfree(ph->dev, db);
err_db:
*p_addr = NULL;
err_xfer:
ph->xops->xfer_put(ph, t);
err_out:
dev_warn(ph->dev,
"Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
pi->proto->id, message_id, domain, ret);
}
#define SCMI_PROTO_FC_RING_DB(w) \
do { \
u##w val = 0; \
\
if (db->mask) \
val = ioread##w(db->addr) & db->mask; \
iowrite##w((u##w)db->set | val, db->addr); \
} while (0)
static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
{
if (!db || !db->addr)
return;
if (db->width == 1)
SCMI_PROTO_FC_RING_DB(8);
else if (db->width == 2)
SCMI_PROTO_FC_RING_DB(16);
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
#else
{
u64 val = 0;
if (db->mask)
val = ioread64_hi_lo(db->addr) & db->mask;
iowrite64_hi_lo(db->set | val, db->addr);
}
#endif
}
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
.fastchannel_init = scmi_common_fastchannel_init,
.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
/**
* scmi_revision_area_get - Retrieve version memory area.
*
* @ph: A reference to the protocol handle.
*
* A helper to grab the version memory area reference during SCMI Base protocol
* initialization.
*
* Return: A reference to the version memory area associated to the SCMI
* instance underlying this protocol handle.
*/
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph)
{
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
return pi->handle->version;
}
/**
* scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
* instance descriptor.
* @info: The reference to the related SCMI instance.
* @proto: The protocol descriptor.
*
* Allocate a new protocol instance descriptor, using the provided @proto
* description, against the specified SCMI instance @info, and initialize it;
* all resources management is handled via a dedicated per-protocol devres
* group.
*
* Context: Assumes to be called with @protocols_mtx already acquired.
* Return: A reference to a freshly allocated and initialized protocol instance
* or ERR_PTR on failure. On failure the @proto reference is at first
* put using @scmi_protocol_put() before releasing all the devres group.
*/
static struct scmi_protocol_instance *
scmi_alloc_init_protocol_instance(struct scmi_info *info,
const struct scmi_protocol *proto)
{
int ret = -ENOMEM;
void *gid;
struct scmi_protocol_instance *pi;
const struct scmi_handle *handle = &info->handle;
/* Protocol specific devres group */
gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
if (!gid) {
scmi_protocol_put(proto->id);
goto out;
}
pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
if (!pi)
goto clean;
pi->gid = gid;
pi->proto = proto;
pi->handle = handle;
pi->ph.dev = handle->dev;
pi->ph.xops = &xfer_ops;
pi->ph.hops = &helpers_ops;
pi->ph.set_priv = scmi_set_protocol_priv;
pi->ph.get_priv = scmi_get_protocol_priv;
refcount_set(&pi->users, 1);
/* proto->init is assured NON NULL by scmi_protocol_register */
ret = pi->proto->instance_init(&pi->ph);
if (ret)
goto clean;
ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
GFP_KERNEL);
if (ret != proto->id)
goto clean;
/*
* Warn but ignore events registration errors since we do not want
* to skip whole protocols if their notifications are messed up.
*/
if (pi->proto->events) {
ret = scmi_register_protocol_events(handle, pi->proto->id,
&pi->ph,
pi->proto->events);
if (ret)
dev_warn(handle->dev,
"Protocol:%X - Events Registration Failed - err:%d\n",
pi->proto->id, ret);
}
devres_close_group(handle->dev, pi->gid);
dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
return pi;
clean:
/* Take care to put the protocol module's owner before releasing all */
scmi_protocol_put(proto->id);
devres_release_group(handle->dev, gid);
out:
return ERR_PTR(ret);
}
/**
* scmi_get_protocol_instance - Protocol initialization helper.
* @handle: A reference to the SCMI platform instance.
* @protocol_id: The protocol being requested.
*
* In case the required protocol has never been requested before for this
* instance, allocate and initialize all the needed structures while handling
* resource allocation with a dedicated per-protocol devres subgroup.
*
* Return: A reference to an initialized protocol instance or error on failure:
* in particular returns -EPROBE_DEFER when the desired protocol could
* NOT be found.
*/
static struct scmi_protocol_instance * __must_check
scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
{
struct scmi_protocol_instance *pi;
struct scmi_info *info = handle_to_scmi_info(handle);
mutex_lock(&info->protocols_mtx);
pi = idr_find(&info->protocols, protocol_id);
if (pi) {
refcount_inc(&pi->users);
} else {
const struct scmi_protocol *proto;
/* Fails if protocol not registered on bus */
proto = scmi_protocol_get(protocol_id);
if (proto)
pi = scmi_alloc_init_protocol_instance(info, proto);
else
pi = ERR_PTR(-EPROBE_DEFER);
}
mutex_unlock(&info->protocols_mtx);
return pi;
}
/**
* scmi_protocol_acquire - Protocol acquire
* @handle: A reference to the SCMI platform instance.
* @protocol_id: The protocol being requested.
*
* Register a new user for the requested protocol on the specified SCMI
* platform instance, possibly triggering its initialization on first user.
*
* Return: 0 if protocol was acquired successfully.
*/
int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
{
return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
}
/**
* scmi_protocol_release - Protocol de-initialization helper.
* @handle: A reference to the SCMI platform instance.
* @protocol_id: The protocol being requested.
*
* Remove one user for the specified protocol and triggers de-initialization
* and resources de-allocation once the last user has gone.
*/
void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
{
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_protocol_instance *pi;
mutex_lock(&info->protocols_mtx);
pi = idr_find(&info->protocols, protocol_id);
if (WARN_ON(!pi))
goto out;
if (refcount_dec_and_test(&pi->users)) {
void *gid = pi->gid;
if (pi->proto->events)
scmi_deregister_protocol_events(handle, protocol_id);
if (pi->proto->instance_deinit)
pi->proto->instance_deinit(&pi->ph);
idr_remove(&info->protocols, protocol_id);
scmi_protocol_put(protocol_id);
devres_release_group(handle->dev, gid);
dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
protocol_id);
}
out:
mutex_unlock(&info->protocols_mtx);
}
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp)
{
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
struct scmi_info *info = handle_to_scmi_info(pi->handle);
info->protocols_imp = prot_imp;
}
static bool
scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
int i;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_revision_info *rev = handle->version;
if (!info->protocols_imp)
return false;
for (i = 0; i < rev->num_protocols; i++)
if (info->protocols_imp[i] == prot_id)
return true;
return false;
}
struct scmi_protocol_devres {
const struct scmi_handle *handle;
u8 protocol_id;
};
static void scmi_devm_release_protocol(struct device *dev, void *res)
{
struct scmi_protocol_devres *dres = res;
scmi_protocol_release(dres->handle, dres->protocol_id);
}
static struct scmi_protocol_instance __must_check *
scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
{
struct scmi_protocol_instance *pi;
struct scmi_protocol_devres *dres;
dres = devres_alloc(scmi_devm_release_protocol,
sizeof(*dres), GFP_KERNEL);
if (!dres)
return ERR_PTR(-ENOMEM);
pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
if (IS_ERR(pi)) {
devres_free(dres);
return pi;
}
dres->handle = sdev->handle;
dres->protocol_id = protocol_id;
devres_add(&sdev->dev, dres);
return pi;
}
/**
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
* @protocol_id: The protocol being requested.
* @ph: A pointer reference used to pass back the associated protocol handle.
*
* Get hold of a protocol accounting for its usage, eventually triggering its
* initialization, and returning the protocol specific operations and related
* protocol handle which will be used as first argument in most of the
* protocols operations methods.
* Being a devres based managed method, protocol hold will be automatically
* released, and possibly de-initialized on last user, once the SCMI driver
* owning the scmi_device is unbound from it.
*
* Return: A reference to the requested protocol operations or error.
* Must be checked for errors by caller.
*/
static const void __must_check *
scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
struct scmi_protocol_handle **ph)
{
struct scmi_protocol_instance *pi;
if (!ph)
return ERR_PTR(-EINVAL);
pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
if (IS_ERR(pi))
return pi;
*ph = &pi->ph;
return pi->proto->ops;
}
/**
* scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
* @protocol_id: The protocol being requested.
*
* Get hold of a protocol accounting for its usage, possibly triggering its
* initialization but without getting access to its protocol specific operations
* and handle.
*
* Being a devres based managed method, protocol hold will be automatically
* released, and possibly de-initialized on last user, once the SCMI driver
* owning the scmi_device is unbound from it.
*
* Return: 0 on SUCCESS
*/
static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
u8 protocol_id)
{
struct scmi_protocol_instance *pi;
pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
if (IS_ERR(pi))
return PTR_ERR(pi);
return 0;
}
static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
{
struct scmi_protocol_devres *dres = res;
if (WARN_ON(!dres || !data))
return 0;
return dres->protocol_id == *((u8 *)data);
}
/**
* scmi_devm_protocol_put - Devres managed put protocol operations and handle
* @sdev: A reference to an scmi_device whose embedded struct device is to
* be used for devres accounting.
* @protocol_id: The protocol being requested.
*
* Explicitly release a protocol hold previously obtained calling the above
* @scmi_devm_protocol_get.
*/
static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
{
int ret;
ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
scmi_devm_protocol_match, &protocol_id);
WARN_ON(ret);
}
/**
* scmi_is_transport_atomic - Method to check if underlying transport for an
* SCMI instance is configured as atomic.
*
* @handle: A reference to the SCMI platform instance.
* @atomic_threshold: An optional return value for the system wide currently
* configured threshold for atomic operations.
*
* Return: True if transport is configured as atomic
*/
static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
unsigned int *atomic_threshold)
{
bool ret;
struct scmi_info *info = handle_to_scmi_info(handle);
ret = info->desc->atomic_enabled &&
is_transport_polling_capable(info->desc);
if (ret && atomic_threshold)
*atomic_threshold = info->atomic_threshold;
return ret;
}
/**
* scmi_handle_get() - Get the SCMI handle for a device
*
* @dev: pointer to device for which we want SCMI handle
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: pointer to handle if successful, NULL on error
*/
static struct scmi_handle *scmi_handle_get(struct device *dev)
{
struct list_head *p;
struct scmi_info *info;
struct scmi_handle *handle = NULL;
mutex_lock(&scmi_list_mutex);
list_for_each(p, &scmi_list) {
info = list_entry(p, struct scmi_info, node);
if (dev->parent == info->dev) {
info->users++;
handle = &info->handle;
break;
}
}
mutex_unlock(&scmi_list_mutex);
return handle;
}
/**
* scmi_handle_put() - Release the handle acquired by scmi_handle_get
*
* @handle: handle acquired by scmi_handle_get
*
* NOTE: The function does not track individual clients of the framework
* and is expected to be maintained by caller of SCMI protocol library.
* scmi_handle_put must be balanced with successful scmi_handle_get
*
* Return: 0 is successfully released
* if null was passed, it returns -EINVAL;
*/
static int scmi_handle_put(const struct scmi_handle *handle)
{
struct scmi_info *info;
if (!handle)
return -EINVAL;
info = handle_to_scmi_info(handle);
mutex_lock(&scmi_list_mutex);
if (!WARN_ON(!info->users))
info->users--;
mutex_unlock(&scmi_list_mutex);
return 0;
}
static void scmi_device_link_add(struct device *consumer,
struct device *supplier)
{
struct device_link *link;
link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
WARN_ON(!link);
}
static void scmi_set_handle(struct scmi_device *scmi_dev)
{
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
if (scmi_dev->handle)
scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
}
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
struct scmi_xfers_info *info)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
dev_err(dev,
"Invalid maximum messages %d, not in range [1 - %lu]\n",
info->max_msg, MSG_TOKEN_MAX);
return -EINVAL;
}
hash_init(info->pending_xfers);
/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
GFP_KERNEL);
if (!info->xfer_alloc_table)
return -ENOMEM;
/*
* Preallocate a number of xfers equal to max inflight messages,
* pre-initialize the buffer pointer to pre-allocated buffers and
* attach all of them to the free list
*/
INIT_HLIST_HEAD(&info->free_xfers);
for (i = 0; i < info->max_msg; i++) {
xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
if (!xfer)
return -ENOMEM;
xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
GFP_KERNEL);
if (!xfer->rx.buf)
return -ENOMEM;
xfer->tx.buf = xfer->rx.buf;
init_completion(&xfer->done);
spin_lock_init(&xfer->lock);
/* Add initialized xfer to the free list */
hlist_add_head(&xfer->node, &info->free_xfers);
}
spin_lock_init(&info->xfer_lock);
return 0;
}
static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
{
const struct scmi_desc *desc = sinfo->desc;
if (!desc->ops->get_max_msg) {
sinfo->tx_minfo.max_msg = desc->max_msg;
sinfo->rx_minfo.max_msg = desc->max_msg;
} else {
struct scmi_chan_info *base_cinfo;
base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
if (!base_cinfo)
return -EINVAL;
sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
/* RX channel is optional so can be skipped */
base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
if (base_cinfo)
sinfo->rx_minfo.max_msg =
desc->ops->get_max_msg(base_cinfo);
}
return 0;
}
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
int ret;
ret = scmi_channels_max_msg_configure(sinfo);
if (ret)
return ret;
ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
if (!ret && !idr_is_empty(&sinfo->rx_idr))
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
return ret;
}
static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id, bool tx)
{
int ret, idx;
char name[32];
struct scmi_chan_info *cinfo;
struct idr *idr;
struct scmi_device *tdev = NULL;
/* Transmit channel is first entry i.e. index 0 */
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
if (!info->desc->ops->chan_available(of_node, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
return -EINVAL;
goto idr_alloc;
}
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
return -ENOMEM;
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
/* Create a unique name for this transport device */
snprintf(name, 32, "__scmi_transport_device_%s_%02X",
idx ? "rx" : "tx", prot_id);
/* Create a uniquely named, dedicated transport device for this chan */
tdev = scmi_device_create(of_node, info->dev, prot_id, name);
if (!tdev) {
dev_err(info->dev,
"failed to create transport device (%s)\n", name);
devm_kfree(info->dev, cinfo);
return -EINVAL;
}
of_node_get(of_node);
cinfo->id = prot_id;
cinfo->dev = &tdev->dev;
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
if (ret) {
of_node_put(of_node);
scmi_device_destroy(info->dev, prot_id, name);
devm_kfree(info->dev, cinfo);
return ret;
}
if (tx && is_polling_required(cinfo, info->desc)) {
if (is_transport_polling_capable(info->desc))
dev_info(&tdev->dev,
"Enabled polling mode TX channel - prot_id:%d\n",
prot_id);
else
dev_warn(&tdev->dev,
"Polling mode NOT supported by transport.\n");
}
idr_alloc:
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(info->dev,
"unable to allocate SCMI idr slot err %d\n", ret);
/* Destroy channel and device only if created by this call. */
if (tdev) {
of_node_put(of_node);
scmi_device_destroy(info->dev, prot_id, name);
devm_kfree(info->dev, cinfo);
}
return ret;
}
cinfo->handle = &info->handle;
return 0;
}
static inline int
scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
int prot_id)
{
int ret = scmi_chan_setup(info, of_node, prot_id, true);
if (!ret) {
/* Rx is optional, report only memory errors */
ret = scmi_chan_setup(info, of_node, prot_id, false);
if (ret && ret != -ENOMEM)
ret = 0;
}
return ret;
}
/**
* scmi_channels_setup - Helper to initialize all required channels
*
* @info: The SCMI instance descriptor.
*
* Initialize all the channels found described in the DT against the underlying
* configured transport using custom defined dedicated devices instead of
* borrowing devices from the SCMI drivers; this way channels are initialized
* upfront during core SCMI stack probing and are no more coupled with SCMI
* devices used by SCMI drivers.
*
* Note that, even though a pair of TX/RX channels is associated to each
* protocol defined in the DT, a distinct freshly initialized channel is
* created only if the DT node for the protocol at hand describes a dedicated
* channel: in all the other cases the common BASE protocol channel is reused.
*
* Return: 0 on Success
*/
static int scmi_channels_setup(struct scmi_info *info)
{
int ret;
struct device_node *child, *top_np = info->dev->of_node;
/* Initialize a common generic channel at first */
ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
if (ret)
return ret;
for_each_available_child_of_node(top_np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
continue;
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
dev_err(info->dev,
"Out of range protocol %d\n", prot_id);
ret = scmi_txrx_setup(info, child, prot_id);
if (ret) {
of_node_put(child);
return ret;
}
}
return 0;
}
static int scmi_chan_destroy(int id, void *p, void *idr)
{
struct scmi_chan_info *cinfo = p;
if (cinfo->dev) {
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
of_node_put(cinfo->dev->of_node);
scmi_device_destroy(info->dev, id, sdev->name);
cinfo->dev = NULL;
}
idr_remove(idr, id);
return 0;
}
static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
{
/* At first free all channels at the transport layer ... */
idr_for_each(idr, info->desc->ops->chan_free, idr);
/* ...then destroy all underlying devices */
idr_for_each(idr, scmi_chan_destroy, idr);
idr_destroy(idr);
}
static void scmi_cleanup_txrx_channels(struct scmi_info *info)
{
scmi_cleanup_channels(info, &info->tx_idr);
scmi_cleanup_channels(info, &info->rx_idr);
}
static int scmi_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct scmi_info *info = bus_nb_to_scmi_info(nb);
struct scmi_device *sdev = to_scmi_dev(data);
/* Skip transport devices and devices of different SCMI instances */
if (!strncmp(sdev->name, "__scmi_transport_device", 23) ||
sdev->dev.parent != info->dev)
return NOTIFY_DONE;
switch (action) {
case BUS_NOTIFY_BIND_DRIVER:
/* setup handle now as the transport is ready */
scmi_set_handle(sdev);
break;
case BUS_NOTIFY_UNBOUND_DRIVER:
scmi_handle_put(sdev->handle);
sdev->handle = NULL;
break;
default:
return NOTIFY_DONE;
}
dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
"about to be BOUND." : "UNBOUND.");
return NOTIFY_OK;
}
static int scmi_device_request_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device_node *np;
struct scmi_device_id *id_table = data;
struct scmi_info *info = req_nb_to_scmi_info(nb);
np = idr_find(&info->active_protocols, id_table->protocol_id);
if (!np)
return NOTIFY_DONE;
dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
id_table->name, id_table->protocol_id);
switch (action) {
case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
scmi_create_protocol_devices(np, info, id_table->protocol_id,
id_table->name);
break;
case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
scmi_destroy_protocol_devices(info, id_table->protocol_id,
id_table->name);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static void scmi_debugfs_common_cleanup(void *d)
{
struct scmi_debug_info *dbg = d;
if (!dbg)
return;
debugfs_remove_recursive(dbg->top_dentry);
kfree(dbg->name);
kfree(dbg->type);
}
static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
{
char top_dir[16];
struct dentry *trans, *top_dentry;
struct scmi_debug_info *dbg;
const char *c_ptr = NULL;
dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
if (!dbg)
return NULL;
dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
if (!dbg->name) {
devm_kfree(info->dev, dbg);
return NULL;
}
of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
dbg->type = kstrdup(c_ptr, GFP_KERNEL);
if (!dbg->type) {
kfree(dbg->name);
devm_kfree(info->dev, dbg);
return NULL;
}
snprintf(top_dir, 16, "%d", info->id);
top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
trans = debugfs_create_dir("transport", top_dentry);
dbg->is_atomic = info->desc->atomic_enabled &&
is_transport_polling_capable(info->desc);
debugfs_create_str("instance_name", 0400, top_dentry,
(char **)&dbg->name);
debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
&info->atomic_threshold);
debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
(u32 *)&info->desc->max_rx_timeout_ms);
debugfs_create_u32("max_msg_size", 0400, trans,
(u32 *)&info->desc->max_msg_size);
debugfs_create_u32("tx_max_msg", 0400, trans,
(u32 *)&info->tx_minfo.max_msg);
debugfs_create_u32("rx_max_msg", 0400, trans,
(u32 *)&info->rx_minfo.max_msg);
dbg->top_dentry = top_dentry;
if (devm_add_action_or_reset(info->dev,
scmi_debugfs_common_cleanup, dbg)) {
scmi_debugfs_common_cleanup(dbg);
return NULL;
}
return dbg;
}
static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
{
int id, num_chans = 0, ret = 0;
struct scmi_chan_info *cinfo;
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
if (!info->dbg)
return -EINVAL;
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
* Cannot happen, but be defensive.
* Zero as num_chans is ok, warn and carry on.
*/
if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
dev_warn(info->dev,
"SCMI RAW - Error enumerating channels\n");
break;
}
if (!test_bit(cinfo->id, protos)) {
channels[num_chans++] = cinfo->id;
set_bit(cinfo->id, protos);
}
}
info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
info->id, channels, num_chans,
info->desc, info->tx_minfo.max_msg);
if (IS_ERR(info->raw)) {
dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
ret = PTR_ERR(info->raw);
info->raw = NULL;
}
return ret;
}
static int scmi_probe(struct platform_device *pdev)
{
int ret;
struct scmi_handle *handle;
const struct scmi_desc *desc;
struct scmi_info *info;
bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
struct device *dev = &pdev->dev;
struct device_node *child, *np = dev->of_node;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
if (info->id < 0)
return info->id;
info->dev = dev;
info->desc = desc;
info->bus_nb.notifier_call = scmi_bus_notifier;
info->dev_req_nb.notifier_call = scmi_device_request_notifier;
INIT_LIST_HEAD(&info->node);
idr_init(&info->protocols);
mutex_init(&info->protocols_mtx);
idr_init(&info->active_protocols);
mutex_init(&info->devreq_mtx);
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
idr_init(&info->rx_idr);
handle = &info->handle;
handle->dev = info->dev;
handle->version = &info->version;
handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
handle->devm_protocol_get = scmi_devm_protocol_get;
handle->devm_protocol_put = scmi_devm_protocol_put;
/* System wide atomic threshold for atomic ops .. if any */
if (!of_property_read_u32(np, "atomic-threshold-us",
&info->atomic_threshold))
dev_info(dev,
"SCMI System wide atomic threshold set to %d us\n",
info->atomic_threshold);
handle->is_transport_atomic = scmi_is_transport_atomic;
if (desc->ops->link_supplier) {
ret = desc->ops->link_supplier(dev);
if (ret)
goto clear_ida;
}
/* Setup all channels described in the DT at first */
ret = scmi_channels_setup(info);
if (ret)
goto clear_ida;
ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
if (ret)
goto clear_txrx_setup;
ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
&info->dev_req_nb);
if (ret)
goto clear_bus_notifier;
ret = scmi_xfer_info_init(info);
if (ret)
goto clear_dev_req_notifier;
if (scmi_top_dentry) {
info->dbg = scmi_debugfs_common_setup(info);
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
goto clear_dev_req_notifier;
/* Bail out anyway when coex disabled. */
return 0;
}
/* Coex enabled, carry on in any case. */
dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
}
}
if (scmi_notification_init(handle))
dev_err(dev, "SCMI Notifications NOT available.\n");
if (info->desc->atomic_enabled &&
!is_transport_polling_capable(info->desc))
dev_err(dev,
"Transport is not polling capable. Atomic mode not supported.\n");
/*
* Trigger SCMI Base protocol initialization.
* It's mandatory and won't be ever released/deinit until the
* SCMI stack is shutdown/unloaded as a whole.
*/
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
if (ret) {
dev_err(dev, "unable to communicate with SCMI\n");
if (coex)
return 0;
goto notification_exit;
}
mutex_lock(&scmi_list_mutex);
list_add_tail(&info->node, &scmi_list);
mutex_unlock(&scmi_list_mutex);
for_each_available_child_of_node(np, child) {
u32 prot_id;
if (of_property_read_u32(child, "reg", &prot_id))
continue;
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
dev_err(dev, "Out of range protocol %d\n", prot_id);
if (!scmi_is_protocol_implemented(handle, prot_id)) {
dev_err(dev, "SCMI protocol %d not implemented\n",
prot_id);
continue;
}
/*
* Save this valid DT protocol descriptor amongst
* @active_protocols for this SCMI instance/
*/
ret = idr_alloc(&info->active_protocols, child,
prot_id, prot_id + 1, GFP_KERNEL);
if (ret != prot_id) {
dev_err(dev, "SCMI protocol %d already activated. Skip\n",
prot_id);
continue;
}
of_node_get(child);
scmi_create_protocol_devices(child, info, prot_id, NULL);
}
return 0;
notification_exit:
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
scmi_raw_mode_cleanup(info->raw);
scmi_notification_exit(&info->handle);
clear_dev_req_notifier:
blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
&info->dev_req_nb);
clear_bus_notifier:
bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
clear_txrx_setup:
scmi_cleanup_txrx_channels(info);
clear_ida:
ida_free(&scmi_id, info->id);
return ret;
}
static int scmi_remove(struct platform_device *pdev)
{
int id;
struct scmi_info *info = platform_get_drvdata(pdev);
struct device_node *child;
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
scmi_raw_mode_cleanup(info->raw);
mutex_lock(&scmi_list_mutex);
if (info->users)
dev_warn(&pdev->dev,
"Still active SCMI users will be forcibly unbound.\n");
list_del(&info->node);
mutex_unlock(&scmi_list_mutex);
scmi_notification_exit(&info->handle);
mutex_lock(&info->protocols_mtx);
idr_destroy(&info->protocols);
mutex_unlock(&info->protocols_mtx);
idr_for_each_entry(&info->active_protocols, child, id)
of_node_put(child);
idr_destroy(&info->active_protocols);
blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
&info->dev_req_nb);
bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
/* Safe to free channels since no more users */
scmi_cleanup_txrx_channels(info);
ida_free(&scmi_id, info->id);
return 0;
}
static ssize_t protocol_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_info *info = dev_get_drvdata(dev);
return sprintf(buf, "%u.%u\n", info->version.major_ver,
info->version.minor_ver);
}
static DEVICE_ATTR_RO(protocol_version);
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_info *info = dev_get_drvdata(dev);
return sprintf(buf, "0x%x\n", info->version.impl_ver);
}
static DEVICE_ATTR_RO(firmware_version);
static ssize_t vendor_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_info *info = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", info->version.vendor_id);
}
static DEVICE_ATTR_RO(vendor_id);
static ssize_t sub_vendor_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scmi_info *info = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", info->version.sub_vendor_id);
}
static DEVICE_ATTR_RO(sub_vendor_id);
static struct attribute *versions_attrs[] = {
&dev_attr_firmware_version.attr,
&dev_attr_protocol_version.attr,
&dev_attr_vendor_id.attr,
&dev_attr_sub_vendor_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
{ .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
{ .compatible = "arm,scmi-smc-param", .data = &scmi_smc_desc},
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
{ .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
#endif
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, scmi_of_match);
static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.suppress_bind_attrs = true,
.of_match_table = scmi_of_match,
.dev_groups = versions_groups,
},
.probe = scmi_probe,
.remove = scmi_remove,
};
/**
* __scmi_transports_setup - Common helper to call transport-specific
* .init/.exit code if provided.
*
* @init: A flag to distinguish between init and exit.
*
* Note that, if provided, we invoke .init/.exit functions for all the
* transports currently compiled in.
*
* Return: 0 on Success.
*/
static inline int __scmi_transports_setup(bool init)
{
int ret = 0;
const struct of_device_id *trans;
for (trans = scmi_of_match; trans->data; trans++) {
const struct scmi_desc *tdesc = trans->data;
if ((init && !tdesc->transport_init) ||
(!init && !tdesc->transport_exit))
continue;
if (init)
ret = tdesc->transport_init();
else
tdesc->transport_exit();
if (ret) {
pr_err("SCMI transport %s FAILED initialization!\n",
trans->compatible);
break;
}
}
return ret;
}
static int __init scmi_transports_init(void)
{
return __scmi_transports_setup(true);
}
static void __exit scmi_transports_exit(void)
{
__scmi_transports_setup(false);
}
static struct dentry *scmi_debugfs_init(void)
{
struct dentry *d;
d = debugfs_create_dir("scmi", NULL);
if (IS_ERR(d)) {
pr_err("Could NOT create SCMI top dentry.\n");
return NULL;
}
return d;
}
static int __init scmi_driver_init(void)
{
int ret;
/* Bail out if no SCMI transport was configured */
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
return -EINVAL;
/* Initialize any compiled-in transport which provided an init/exit */
ret = scmi_transports_init();
if (ret)
return ret;
if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
scmi_top_dentry = scmi_debugfs_init();
scmi_base_register();
scmi_clock_register();
scmi_perf_register();
scmi_power_register();
scmi_reset_register();
scmi_sensors_register();
scmi_voltage_register();
scmi_system_register();
scmi_powercap_register();
return platform_driver_register(&scmi_driver);
}
module_init(scmi_driver_init);
static void __exit scmi_driver_exit(void)
{
scmi_base_unregister();
scmi_clock_unregister();
scmi_perf_unregister();
scmi_power_unregister();
scmi_reset_unregister();
scmi_sensors_unregister();
scmi_voltage_unregister();
scmi_system_unregister();
scmi_powercap_unregister();
scmi_transports_exit();
platform_driver_unregister(&scmi_driver);
debugfs_remove_recursive(scmi_top_dentry);
}
module_exit(scmi_driver_exit);
MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/arm_scmi/driver.c |
// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Mailbox Transport
* driver.
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/mailbox_client.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include "common.h"
/**
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox uni/bi-directional channel
* @chan_receiver: Optional Receiver mailbox unidirectional channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
*/
struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
struct mbox_chan *chan_receiver;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
shmem_tx_prepare(smbox->shmem, m, smbox->cinfo);
}
static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
static bool mailbox_chan_available(struct device_node *of_node, int idx)
{
int num_mb;
/*
* Just check if bidirrectional channels are involved, and check the
* index accordingly; proper full validation will be made later
* in mailbox_chan_setup().
*/
num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells");
if (num_mb == 3 && idx == 1)
idx = 2;
return !of_parse_phandle_with_args(of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
/**
* mailbox_chan_validate - Validate transport configuration and map channels
*
* @cdev: Reference to the underlying transport device carrying the
* of_node descriptor to analyze.
* @a2p_rx_chan: A reference to an optional unidirectional channel to use
* for replies on the a2p channel. Set as zero if not present.
* @p2a_chan: A reference to the optional p2a channel.
* Set as zero if not present.
*
* At first, validate the transport configuration as described in terms of
* 'mboxes' and 'shmem', then determin which mailbox channel indexes are
* appropriate to be use in the current configuration.
*
* Return: 0 on Success or error
*/
static int mailbox_chan_validate(struct device *cdev,
int *a2p_rx_chan, int *p2a_chan)
{
int num_mb, num_sh, ret = 0;
struct device_node *np = cdev->of_node;
num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
num_sh = of_count_phandle_with_args(np, "shmem", NULL);
dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh);
/* Bail out if mboxes and shmem descriptors are inconsistent */
if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 3 ||
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2)) {
dev_warn(cdev,
"Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
of_node_full_name(np), num_mb, num_sh);
return -EINVAL;
}
/* Bail out if provided shmem descriptors do not refer distinct areas */
if (num_sh > 1) {
struct device_node *np_tx, *np_rx;
np_tx = of_parse_phandle(np, "shmem", 0);
np_rx = of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
of_node_full_name(np));
ret = -EINVAL;
}
of_node_put(np_tx);
of_node_put(np_rx);
}
/* Calculate channels IDs to use depending on mboxes/shmem layout */
if (!ret) {
switch (num_mb) {
case 1:
*a2p_rx_chan = 0;
*p2a_chan = 0;
break;
case 2:
if (num_sh == 2) {
*a2p_rx_chan = 0;
*p2a_chan = 1;
} else {
*a2p_rx_chan = 1;
*p2a_chan = 0;
}
break;
case 3:
*a2p_rx_chan = 1;
*p2a_chan = 2;
break;
}
}
return ret;
}
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
const char *desc = tx ? "Tx" : "Rx";
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
struct device_node *shmem;
int ret, a2p_rx_chan, p2a_chan, idx = tx ? 0 : 1;
struct mbox_client *cl;
resource_size_t size;
struct resource res;
ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan);
if (ret)
return ret;
if (!tx && !p2a_chan)
return -ENODEV;
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
of_node_put(shmem);
return -ENXIO;
}
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
return ret;
}
size = resource_size(&res);
smbox->shmem = devm_ioremap(dev, res.start, size);
if (!smbox->shmem) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return -EADDRNOTAVAIL;
}
cl = &smbox->cl;
cl->dev = cdev;
cl->tx_prepare = tx ? tx_prepare : NULL;
cl->rx_callback = rx_callback;
cl->tx_block = false;
cl->knows_txdone = tx;
smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan);
if (IS_ERR(smbox->chan)) {
ret = PTR_ERR(smbox->chan);
if (ret != -EPROBE_DEFER)
dev_err(cdev,
"failed to request SCMI %s mailbox\n", desc);
return ret;
}
/* Additional unidirectional channel for TX if needed */
if (tx && a2p_rx_chan) {
smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan);
if (IS_ERR(smbox->chan_receiver)) {
ret = PTR_ERR(smbox->chan_receiver);
if (ret != -EPROBE_DEFER)
dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n");
return ret;
}
}
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
return 0;
}
static int mailbox_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_mailbox *smbox = cinfo->transport_info;
if (smbox && !IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
mbox_free_channel(smbox->chan_receiver);
cinfo->transport_info = NULL;
smbox->chan = NULL;
smbox->chan_receiver = NULL;
smbox->cinfo = NULL;
}
return 0;
}
static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
int ret;
ret = mbox_send_message(smbox->chan, xfer);
/* mbox_send_message returns non-negative value on success, so reset */
if (ret > 0)
ret = 0;
return ret;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(smbox->chan, ret);
}
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_fetch_response(smbox->shmem, xfer);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
size_t max_len, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_fetch_notification(smbox->shmem, max_len, xfer);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_clear_channel(smbox->shmem);
}
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
return shmem_poll_done(smbox->shmem, xfer);
}
static const struct scmi_transport_ops scmi_mailbox_ops = {
.chan_available = mailbox_chan_available,
.chan_setup = mailbox_chan_setup,
.chan_free = mailbox_chan_free,
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
.fetch_notification = mailbox_fetch_notification,
.clear_channel = mailbox_clear_channel,
.poll_done = mailbox_poll_done,
};
const struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};
| linux-master | drivers/firmware/arm_scmi/mailbox.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 ARM Limited
*/
#define pr_fmt(fmt) "psci: " fmt
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <uapi/linux/psci.h>
#include <asm/cpuidle.h>
#include <asm/cputype.h>
#include <asm/hypervisor.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
/*
* While a 64-bit OS can make calls with SMC32 calling conventions, for some
* calls it is necessary to use SMC64 to pass or return 64-bit values.
* For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate
* (native-width) function ID.
*/
#ifdef CONFIG_64BIT
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
#else
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
#endif
/*
* The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
* calls to its resident CPU, so we must avoid issuing those. We never migrate
* a Trusted OS even if it claims to be capable of migration -- doing so will
* require cooperation with a Trusted OS driver.
*/
static int resident_cpu = -1;
struct psci_operations psci_ops;
static enum arm_smccc_conduit psci_conduit = SMCCC_CONDUIT_NONE;
bool psci_tos_resident_on(int cpu)
{
return cpu == resident_cpu;
}
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
static struct psci_0_1_function_ids psci_0_1_function_ids;
struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
{
return psci_0_1_function_ids;
}
#define PSCI_0_2_POWER_STATE_MASK \
(PSCI_0_2_POWER_STATE_ID_MASK | \
PSCI_0_2_POWER_STATE_TYPE_MASK | \
PSCI_0_2_POWER_STATE_AFFL_MASK)
#define PSCI_1_0_EXT_POWER_STATE_MASK \
(PSCI_1_0_EXT_POWER_STATE_ID_MASK | \
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
static u32 psci_cpu_suspend_feature;
static bool psci_system_reset2_supported;
static inline bool psci_has_ext_power_state(void)
{
return psci_cpu_suspend_feature &
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
bool psci_has_osi_support(void)
{
return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
}
static inline bool psci_power_state_loses_context(u32 state)
{
const u32 mask = psci_has_ext_power_state() ?
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
PSCI_0_2_POWER_STATE_TYPE_MASK;
return state & mask;
}
bool psci_power_state_is_valid(u32 state)
{
const u32 valid_mask = psci_has_ext_power_state() ?
PSCI_1_0_EXT_POWER_STATE_MASK :
PSCI_0_2_POWER_STATE_MASK;
return !(state & ~valid_mask);
}
static __always_inline unsigned long
__invoke_psci_fn_hvc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
unsigned long arg2)
{
struct arm_smccc_res res;
arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static __always_inline unsigned long
__invoke_psci_fn_smc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
unsigned long arg2)
{
struct arm_smccc_res res;
arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
return res.a0;
}
static __always_inline int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
case PSCI_RET_INVALID_PARAMS:
case PSCI_RET_INVALID_ADDRESS:
return -EINVAL;
case PSCI_RET_DENIED:
return -EPERM;
}
return -EINVAL;
}
static u32 psci_0_1_get_version(void)
{
return PSCI_VERSION(0, 1);
}
static u32 psci_0_2_get_version(void)
{
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
int psci_set_osi_mode(bool enable)
{
unsigned long suspend_mode;
int err;
suspend_mode = enable ? PSCI_1_0_SUSPEND_MODE_OSI :
PSCI_1_0_SUSPEND_MODE_PC;
err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
if (err < 0)
pr_info(FW_BUG "failed to set %s mode: %d\n",
enable ? "OSI" : "PC", err);
return psci_to_linux_errno(err);
}
static __always_inline int
__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
static __always_inline int
psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
state, entry_point);
}
static __always_inline int
psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
state, entry_point);
}
static int __psci_cpu_off(u32 fn, u32 state)
{
int err;
err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_0_1_cpu_off(u32 state)
{
return __psci_cpu_off(psci_0_1_function_ids.cpu_off, state);
}
static int psci_0_2_cpu_off(u32 state)
{
return __psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
}
static int __psci_cpu_on(u32 fn, unsigned long cpuid, unsigned long entry_point)
{
int err;
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
static int psci_0_1_cpu_on(unsigned long cpuid, unsigned long entry_point)
{
return __psci_cpu_on(psci_0_1_function_ids.cpu_on, cpuid, entry_point);
}
static int psci_0_2_cpu_on(unsigned long cpuid, unsigned long entry_point)
{
return __psci_cpu_on(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid, entry_point);
}
static int __psci_migrate(u32 fn, unsigned long cpuid)
{
int err;
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
static int psci_0_1_migrate(unsigned long cpuid)
{
return __psci_migrate(psci_0_1_function_ids.migrate, cpuid);
}
static int psci_0_2_migrate(unsigned long cpuid)
{
return __psci_migrate(PSCI_FN_NATIVE(0_2, MIGRATE), cpuid);
}
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO),
target_affinity, lowest_affinity_level, 0);
}
static int psci_migrate_info_type(void)
{
return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
}
static unsigned long psci_migrate_info_up_cpu(void)
{
return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
0, 0, 0);
}
static void set_conduit(enum arm_smccc_conduit conduit)
{
switch (conduit) {
case SMCCC_CONDUIT_HVC:
invoke_psci_fn = __invoke_psci_fn_hvc;
break;
case SMCCC_CONDUIT_SMC:
invoke_psci_fn = __invoke_psci_fn_smc;
break;
default:
WARN(1, "Unexpected PSCI conduit %d\n", conduit);
}
psci_conduit = conduit;
}
static int get_set_conduit_method(const struct device_node *np)
{
const char *method;
pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
pr_warn("missing \"method\" property\n");
return -ENXIO;
}
if (!strcmp("hvc", method)) {
set_conduit(SMCCC_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
set_conduit(SMCCC_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
}
return 0;
}
static int psci_sys_reset(struct notifier_block *nb, unsigned long action,
void *data)
{
if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
psci_system_reset2_supported) {
/*
* reset_type[31] = 0 (architectural)
* reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
* cookie = 0 (ignored by the implementation)
*/
invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
} else {
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
return NOTIFY_DONE;
}
static struct notifier_block psci_sys_reset_nb = {
.notifier_call = psci_sys_reset,
.priority = 129,
};
static void psci_sys_poweroff(void)
{
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
psci_func_id, 0, 0);
}
#ifdef CONFIG_DEBUG_FS
#define PSCI_ID(ver, _name) \
{ .fn = PSCI_##ver##_FN_##_name, .name = #_name, }
#define PSCI_ID_NATIVE(ver, _name) \
{ .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, }
/* A table of all optional functions */
static const struct {
u32 fn;
const char *name;
} psci_fn_ids[] = {
PSCI_ID_NATIVE(0_2, MIGRATE),
PSCI_ID(0_2, MIGRATE_INFO_TYPE),
PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
PSCI_ID(1_0, CPU_FREEZE),
PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND),
PSCI_ID_NATIVE(1_0, NODE_HW_STATE),
PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND),
PSCI_ID(1_0, SET_SUSPEND_MODE),
PSCI_ID_NATIVE(1_0, STAT_RESIDENCY),
PSCI_ID_NATIVE(1_0, STAT_COUNT),
PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
PSCI_ID(1_1, MEM_PROTECT),
PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
};
static int psci_debugfs_read(struct seq_file *s, void *data)
{
int feature, type, i;
u32 ver;
ver = psci_ops.get_version();
seq_printf(s, "PSCIv%d.%d\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
/* PSCI_FEATURES is available only starting from 1.0 */
if (PSCI_VERSION_MAJOR(ver) < 1)
return 0;
feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
if (feature != PSCI_RET_NOT_SUPPORTED) {
ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
seq_printf(s, "SMC Calling Convention v%d.%d\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
} else {
seq_puts(s, "SMC Calling Convention v1.0 is assumed\n");
}
feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
if (feature < 0) {
seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature);
} else {
seq_printf(s, "OSI is %ssupported\n",
(feature & BIT(0)) ? "" : "not ");
seq_printf(s, "%s StateID format is used\n",
(feature & BIT(1)) ? "Extended" : "Original");
}
type = psci_ops.migrate_info_type();
if (type == PSCI_0_2_TOS_UP_MIGRATE ||
type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
unsigned long cpuid;
seq_printf(s, "Trusted OS %smigrate capable\n",
type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : "");
cpuid = psci_migrate_info_up_cpu();
seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n",
cpuid, resident_cpu);
} else if (type == PSCI_0_2_TOS_MP) {
seq_puts(s, "Trusted OS migration not required\n");
} else {
if (type != PSCI_RET_NOT_SUPPORTED)
seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
}
for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) {
feature = psci_features(psci_fn_ids[i].fn);
if (feature == PSCI_RET_NOT_SUPPORTED)
continue;
if (feature < 0)
seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n",
psci_fn_ids[i].name, feature);
else
seq_printf(s, "%s is supported\n", psci_fn_ids[i].name);
}
return 0;
}
static int psci_debugfs_open(struct inode *inode, struct file *f)
{
return single_open(f, psci_debugfs_read, NULL);
}
static const struct file_operations psci_debugfs_ops = {
.owner = THIS_MODULE,
.open = psci_debugfs_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek
};
static int __init psci_debugfs_init(void)
{
if (!invoke_psci_fn || !psci_ops.get_version)
return 0;
return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
&psci_debugfs_ops));
}
late_initcall(psci_debugfs_init)
#endif
#ifdef CONFIG_CPU_IDLE
static noinstr int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
phys_addr_t pa_cpu_resume;
pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
int psci_cpu_suspend_enter(u32 state)
{
int ret;
if (!psci_power_state_loses_context(state)) {
struct arm_cpuidle_irq_context context;
ct_cpuidle_enter();
arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
arm_cpuidle_restore_irq_context(&context);
ct_cpuidle_exit();
} else {
/*
* ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
*/
if (!IS_ENABLED(CONFIG_ARM64))
ct_cpuidle_enter();
ret = cpu_suspend(state, psci_suspend_finisher);
if (!IS_ENABLED(CONFIG_ARM64))
ct_cpuidle_exit();
}
return ret;
}
#endif
static int psci_system_suspend(unsigned long unused)
{
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
}
static int psci_system_suspend_enter(suspend_state_t state)
{
return cpu_suspend(0, psci_system_suspend);
}
static const struct platform_suspend_ops psci_suspend_ops = {
.valid = suspend_valid_only_mem,
.enter = psci_system_suspend_enter,
};
static void __init psci_init_system_reset2(void)
{
int ret;
ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
if (ret != PSCI_RET_NOT_SUPPORTED)
psci_system_reset2_supported = true;
}
static void __init psci_init_system_suspend(void)
{
int ret;
if (!IS_ENABLED(CONFIG_SUSPEND))
return;
ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND));
if (ret != PSCI_RET_NOT_SUPPORTED)
suspend_set_ops(&psci_suspend_ops);
}
static void __init psci_init_cpu_suspend(void)
{
int feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
if (feature != PSCI_RET_NOT_SUPPORTED)
psci_cpu_suspend_feature = feature;
}
/*
* Detect the presence of a resident Trusted OS which may cause CPU_OFF to
* return DENIED (which would be fatal).
*/
static void __init psci_init_migrate(void)
{
unsigned long cpuid;
int type, cpu = -1;
type = psci_ops.migrate_info_type();
if (type == PSCI_0_2_TOS_MP) {
pr_info("Trusted OS migration not required\n");
return;
}
if (type == PSCI_RET_NOT_SUPPORTED) {
pr_info("MIGRATE_INFO_TYPE not supported.\n");
return;
}
if (type != PSCI_0_2_TOS_UP_MIGRATE &&
type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
return;
}
cpuid = psci_migrate_info_up_cpu();
if (cpuid & ~MPIDR_HWID_BITMASK) {
pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
cpuid);
return;
}
cpu = get_logical_index(cpuid);
resident_cpu = cpu >= 0 ? cpu : -1;
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
static void __init psci_init_smccc(void)
{
u32 ver = ARM_SMCCC_VERSION_1_0;
int feature;
feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
if (feature != PSCI_RET_NOT_SUPPORTED) {
u32 ret;
ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
if (ret >= ARM_SMCCC_VERSION_1_1) {
arm_smccc_version_init(ret, psci_conduit);
ver = ret;
}
}
/*
* Conveniently, the SMCCC and PSCI versions are encoded the
* same way. No, this isn't accidental.
*/
pr_info("SMC Calling Convention v%d.%d\n",
PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
}
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
psci_ops = (struct psci_operations){
.get_version = psci_0_2_get_version,
.cpu_suspend = psci_0_2_cpu_suspend,
.cpu_off = psci_0_2_cpu_off,
.cpu_on = psci_0_2_cpu_on,
.migrate = psci_0_2_migrate,
.affinity_info = psci_affinity_info,
.migrate_info_type = psci_migrate_info_type,
};
register_restart_handler(&psci_sys_reset_nb);
pm_power_off = psci_sys_poweroff;
}
/*
* Probe function for PSCI firmware versions >= 0.2
*/
static int __init psci_probe(void)
{
u32 ver = psci_0_2_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
PSCI_VERSION_MINOR(ver));
if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
pr_err("Conflicting PSCI version detected.\n");
return -EINVAL;
}
psci_0_2_set_functions();
psci_init_migrate();
if (PSCI_VERSION_MAJOR(ver) >= 1) {
psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
psci_init_system_reset2();
kvm_init_hyp_services();
}
return 0;
}
typedef int (*psci_initcall_t)(const struct device_node *);
/*
* PSCI init function for PSCI versions >=0.2
*
* Probe based on PSCI PSCI_VERSION function
*/
static int __init psci_0_2_init(const struct device_node *np)
{
int err;
err = get_set_conduit_method(np);
if (err)
return err;
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
* that PSCI function IDs and version specific initialization
* can be carried out according to the specific version reported
* by firmware
*/
return psci_probe();
}
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
static int __init psci_0_1_init(const struct device_node *np)
{
u32 id;
int err;
err = get_set_conduit_method(np);
if (err)
return err;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
psci_ops.get_version = psci_0_1_get_version;
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_0_1_function_ids.cpu_suspend = id;
psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
psci_0_1_function_ids.cpu_off = id;
psci_ops.cpu_off = psci_0_1_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
psci_0_1_function_ids.cpu_on = id;
psci_ops.cpu_on = psci_0_1_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
psci_0_1_function_ids.migrate = id;
psci_ops.migrate = psci_0_1_migrate;
}
return 0;
}
static int __init psci_1_0_init(const struct device_node *np)
{
int err;
err = psci_0_2_init(np);
if (err)
return err;
if (psci_has_osi_support()) {
pr_info("OSI mode supported.\n");
/* Default to PC mode. */
psci_set_osi_mode(false);
}
return 0;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{ .compatible = "arm,psci-1.0", .data = psci_1_0_init},
{},
};
int __init psci_dt_init(void)
{
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
int ret;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
if (!np || !of_device_is_available(np))
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
ret = init_fn(np);
of_node_put(np);
return ret;
}
#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
*/
int __init psci_acpi_init(void)
{
if (!acpi_psci_present()) {
pr_info("is not implemented in ACPI.\n");
return -EOPNOTSUPP;
}
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
set_conduit(SMCCC_CONDUIT_HVC);
else
set_conduit(SMCCC_CONDUIT_SMC);
return psci_probe();
}
#endif
| linux-master | drivers/firmware/psci/psci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2016 ARM Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/psci.h>
#include <linux/slab.h>
#include <linux/tick.h>
#include <linux/topology.h>
#include <asm/cpuidle.h>
#include <uapi/linux/psci.h>
#define NUM_SUSPEND_CYCLE (10)
static unsigned int nb_available_cpus;
static int tos_resident_cpu = -1;
static atomic_t nb_active_threads;
static struct completion suspend_threads_started =
COMPLETION_INITIALIZER(suspend_threads_started);
static struct completion suspend_threads_done =
COMPLETION_INITIALIZER(suspend_threads_done);
/*
* We assume that PSCI operations are used if they are available. This is not
* necessarily true on arm64, since the decision is based on the
* "enable-method" property of each CPU in the DT, but given that there is no
* arch-specific way to check this, we assume that the DT is sensible.
*/
static int psci_ops_check(void)
{
int migrate_type = -1;
int cpu;
if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
pr_warn("Missing PSCI operations, aborting tests\n");
return -EOPNOTSUPP;
}
if (psci_ops.migrate_info_type)
migrate_type = psci_ops.migrate_info_type();
if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
/* There is a UP Trusted OS, find on which core it resides. */
for_each_online_cpu(cpu)
if (psci_tos_resident_on(cpu)) {
tos_resident_cpu = cpu;
break;
}
if (tos_resident_cpu == -1)
pr_warn("UP Trusted OS resides on no online CPU\n");
}
return 0;
}
/*
* offlined_cpus is a temporary array but passing it as an argument avoids
* multiple allocations.
*/
static unsigned int down_and_up_cpus(const struct cpumask *cpus,
struct cpumask *offlined_cpus)
{
int cpu;
int err = 0;
cpumask_clear(offlined_cpus);
/* Try to power down all CPUs in the mask. */
for_each_cpu(cpu, cpus) {
int ret = remove_cpu(cpu);
/*
* cpu_down() checks the number of online CPUs before the TOS
* resident CPU.
*/
if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
if (ret != -EBUSY) {
pr_err("Unexpected return code %d while trying "
"to power down last online CPU %d\n",
ret, cpu);
++err;
}
} else if (cpu == tos_resident_cpu) {
if (ret != -EPERM) {
pr_err("Unexpected return code %d while trying "
"to power down TOS resident CPU %d\n",
ret, cpu);
++err;
}
} else if (ret != 0) {
pr_err("Error occurred (%d) while trying "
"to power down CPU %d\n", ret, cpu);
++err;
}
if (ret == 0)
cpumask_set_cpu(cpu, offlined_cpus);
}
/* Try to power up all the CPUs that have been offlined. */
for_each_cpu(cpu, offlined_cpus) {
int ret = add_cpu(cpu);
if (ret != 0) {
pr_err("Error occurred (%d) while trying "
"to power up CPU %d\n", ret, cpu);
++err;
} else {
cpumask_clear_cpu(cpu, offlined_cpus);
}
}
/*
* Something went bad at some point and some CPUs could not be turned
* back on.
*/
WARN_ON(!cpumask_empty(offlined_cpus) ||
num_online_cpus() != nb_available_cpus);
return err;
}
static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups)
{
int i;
cpumask_var_t *cpu_groups = *pcpu_groups;
for (i = 0; i < num; ++i)
free_cpumask_var(cpu_groups[i]);
kfree(cpu_groups);
}
static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
{
int num_groups = 0;
cpumask_var_t tmp, *cpu_groups;
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
GFP_KERNEL);
if (!cpu_groups) {
free_cpumask_var(tmp);
return -ENOMEM;
}
cpumask_copy(tmp, cpu_online_mask);
while (!cpumask_empty(tmp)) {
const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}
cpumask_copy(cpu_groups[num_groups++], cpu_group);
cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
*pcpu_groups = cpu_groups;
return num_groups;
}
static int hotplug_tests(void)
{
int i, nb_cpu_group, err = -ENOMEM;
cpumask_var_t offlined_cpus, *cpu_groups;
char *page_buf;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
nb_cpu_group = alloc_init_cpu_groups(&cpu_groups);
if (nb_cpu_group < 0)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
goto out_free_cpu_groups;
/*
* Of course the last CPU cannot be powered down and cpu_down() should
* refuse doing that.
*/
pr_info("Trying to turn off and on again all CPUs\n");
err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
* Take down CPUs by cpu group this time. When the last CPU is turned
* off, the cpu group itself should shut down.
*/
for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
i, page_buf);
err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
out_free_cpu_groups:
free_cpu_groups(nb_cpu_group, &cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
}
static void dummy_callback(struct timer_list *unused) {}
static int suspend_cpu(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct cpuidle_state *state = &drv->states[index];
bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
int ret;
arch_cpu_idle_enter();
if (broadcast) {
/*
* The local timer will be shut down, we need to enter tick
* broadcast.
*/
ret = tick_broadcast_enter();
if (ret) {
/*
* In the absence of hardware broadcast mechanism,
* this CPU might be used to broadcast wakeups, which
* may be why entering tick broadcast has failed.
* There is little the kernel can do to work around
* that, so enter WFI instead (idle state 0).
*/
cpu_do_idle();
ret = 0;
goto out_arch_exit;
}
}
ret = state->enter(dev, drv, index);
if (broadcast)
tick_broadcast_exit();
out_arch_exit:
arch_cpu_idle_exit();
return ret;
}
static int suspend_test_thread(void *arg)
{
int cpu = (long)arg;
int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
struct cpuidle_device *dev;
struct cpuidle_driver *drv;
/* No need for an actual callback, we just want to wake up the CPU. */
struct timer_list wakeup_timer;
/* Wait for the main thread to give the start signal. */
wait_for_completion(&suspend_threads_started);
/* Set maximum priority to preempt all other threads on this CPU. */
sched_set_fifo(current);
dev = this_cpu_read(cpuidle_devices);
drv = cpuidle_get_cpu_driver(dev);
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
* Test all possible states, except 0 (which is usually WFI and
* doesn't use PSCI).
*/
for (index = 1; index < drv->state_count; ++index) {
int ret;
struct cpuidle_state *state = &drv->states[index];
/*
* Set the timer to wake this CPU up in some time (which
* should be largely sufficient for entering suspend).
* If the local tick is disabled when entering suspend,
* suspend_cpu() takes care of switching to a broadcast
* tick, so the timer will still wake us up.
*/
mod_timer(&wakeup_timer, jiffies +
usecs_to_jiffies(state->target_residency));
/* IRQs must be disabled during suspend operations. */
local_irq_disable();
ret = suspend_cpu(dev, drv, index);
/*
* We have woken up. Re-enable IRQs to handle any
* pending interrupt, do not wait until the end of the
* loop.
*/
local_irq_enable();
if (ret == index) {
++nb_suspend;
} else if (ret >= 0) {
/* We did not enter the expected state. */
++nb_shallow_sleep;
} else {
pr_err("Failed to suspend CPU %d: error %d "
"(requested state %d, cycle %d)\n",
cpu, ret, index, i);
++nb_err;
}
}
}
/*
* Disable the timer to make sure that the timer will not trigger
* later.
*/
del_timer(&wakeup_timer);
destroy_timer_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
for (;;) {
/* Needs to be set first to avoid missing a wakeup. */
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_park())
break;
schedule();
}
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
cpu, nb_suspend, nb_shallow_sleep, nb_err);
kthread_parkme();
return nb_err;
}
static int suspend_tests(void)
{
int i, cpu, err = 0;
struct task_struct **threads;
int nb_threads = 0;
threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
GFP_KERNEL);
if (!threads)
return -ENOMEM;
/*
* Stop cpuidle to prevent the idle tasks from entering a deep sleep
* mode, as it might interfere with the suspend threads on other CPUs.
* This does not prevent the suspend threads from using cpuidle (only
* the idle tasks check this status). Take the idle lock so that
* the cpuidle driver and device look-up can be carried out safely.
*/
cpuidle_pause_and_lock();
for_each_online_cpu(cpu) {
struct task_struct *thread;
/* Check that cpuidle is available on that CPU. */
struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
if (!dev || !drv) {
pr_warn("cpuidle not available on CPU %d, ignoring\n",
cpu);
continue;
}
thread = kthread_create_on_cpu(suspend_test_thread,
(void *)(long)cpu, cpu,
"psci_suspend_test");
if (IS_ERR(thread))
pr_err("Failed to create kthread on CPU %d\n", cpu);
else
threads[nb_threads++] = thread;
}
if (nb_threads < 1) {
err = -ENODEV;
goto out;
}
atomic_set(&nb_active_threads, nb_threads);
/*
* Wake up the suspend threads. To avoid the main thread being preempted
* before all the threads have been unparked, the suspend threads will
* wait for the completion of suspend_threads_started.
*/
for (i = 0; i < nb_threads; ++i)
wake_up_process(threads[i]);
complete_all(&suspend_threads_started);
wait_for_completion(&suspend_threads_done);
/* Stop and destroy all threads, get return status. */
for (i = 0; i < nb_threads; ++i) {
err += kthread_park(threads[i]);
err += kthread_stop(threads[i]);
}
out:
cpuidle_resume_and_unlock();
kfree(threads);
return err;
}
static int __init psci_checker(void)
{
int ret;
/*
* Since we're in an initcall, we assume that all the CPUs that all
* CPUs that can be onlined have been onlined.
*
* The tests assume that hotplug is enabled but nobody else is using it,
* otherwise the results will be unpredictable. However, since there
* is no userspace yet in initcalls, that should be fine, as long as
* no torture test is running at the same time (see Kconfig).
*/
nb_available_cpus = num_online_cpus();
/* Check PSCI operations are set up and working. */
ret = psci_ops_check();
if (ret)
return ret;
pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
pr_info("Starting hotplug tests\n");
ret = hotplug_tests();
if (ret == 0)
pr_info("Hotplug tests passed OK\n");
else if (ret > 0)
pr_err("%d error(s) encountered in hotplug tests\n", ret);
else {
pr_err("Out of memory\n");
return ret;
}
pr_info("Starting suspend tests (%d cycles per state)\n",
NUM_SUSPEND_CYCLE);
ret = suspend_tests();
if (ret == 0)
pr_info("Suspend tests passed OK\n");
else if (ret > 0)
pr_err("%d error(s) encountered in suspend tests\n", ret);
else {
switch (ret) {
case -ENOMEM:
pr_err("Out of memory\n");
break;
case -ENODEV:
pr_warn("Could not start suspend tests on any CPU\n");
break;
}
}
pr_info("PSCI checker completed\n");
return ret < 0 ? ret : 0;
}
late_initcall(psci_checker);
| linux-master | drivers/firmware/psci/psci_checker.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cs_dsp.c -- Cirrus Logic DSP firmware support
*
* Based on sound/soc/codecs/wm_adsp.c
*
* Copyright 2012 Wolfson Microelectronics plc
* Copyright (C) 2015-2021 Cirrus Logic, Inc. and
* Cirrus Logic International Semiconductor Ltd.
*/
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/firmware/cirrus/wmfw.h>
#define cs_dsp_err(_dsp, fmt, ...) \
dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define cs_dsp_warn(_dsp, fmt, ...) \
dev_warn(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define cs_dsp_info(_dsp, fmt, ...) \
dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define cs_dsp_dbg(_dsp, fmt, ...) \
dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
#define ADSP1_CONTROL_1 0x00
#define ADSP1_CONTROL_2 0x02
#define ADSP1_CONTROL_3 0x03
#define ADSP1_CONTROL_4 0x04
#define ADSP1_CONTROL_5 0x06
#define ADSP1_CONTROL_6 0x07
#define ADSP1_CONTROL_7 0x08
#define ADSP1_CONTROL_8 0x09
#define ADSP1_CONTROL_9 0x0A
#define ADSP1_CONTROL_10 0x0B
#define ADSP1_CONTROL_11 0x0C
#define ADSP1_CONTROL_12 0x0D
#define ADSP1_CONTROL_13 0x0F
#define ADSP1_CONTROL_14 0x10
#define ADSP1_CONTROL_15 0x11
#define ADSP1_CONTROL_16 0x12
#define ADSP1_CONTROL_17 0x13
#define ADSP1_CONTROL_18 0x14
#define ADSP1_CONTROL_19 0x16
#define ADSP1_CONTROL_20 0x17
#define ADSP1_CONTROL_21 0x18
#define ADSP1_CONTROL_22 0x1A
#define ADSP1_CONTROL_23 0x1B
#define ADSP1_CONTROL_24 0x1C
#define ADSP1_CONTROL_25 0x1E
#define ADSP1_CONTROL_26 0x20
#define ADSP1_CONTROL_27 0x21
#define ADSP1_CONTROL_28 0x22
#define ADSP1_CONTROL_29 0x23
#define ADSP1_CONTROL_30 0x24
#define ADSP1_CONTROL_31 0x26
/*
* ADSP1 Control 19
*/
#define ADSP1_WDMA_BUFFER_LENGTH_MASK 0x00FF /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
#define ADSP1_WDMA_BUFFER_LENGTH_SHIFT 0 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
#define ADSP1_WDMA_BUFFER_LENGTH_WIDTH 8 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
/*
* ADSP1 Control 30
*/
#define ADSP1_DBG_CLK_ENA 0x0008 /* DSP1_DBG_CLK_ENA */
#define ADSP1_DBG_CLK_ENA_MASK 0x0008 /* DSP1_DBG_CLK_ENA */
#define ADSP1_DBG_CLK_ENA_SHIFT 3 /* DSP1_DBG_CLK_ENA */
#define ADSP1_DBG_CLK_ENA_WIDTH 1 /* DSP1_DBG_CLK_ENA */
#define ADSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
#define ADSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
#define ADSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
#define ADSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
#define ADSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
#define ADSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
#define ADSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
#define ADSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
#define ADSP1_START 0x0001 /* DSP1_START */
#define ADSP1_START_MASK 0x0001 /* DSP1_START */
#define ADSP1_START_SHIFT 0 /* DSP1_START */
#define ADSP1_START_WIDTH 1 /* DSP1_START */
/*
* ADSP1 Control 31
*/
#define ADSP1_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
#define ADSP2_CONTROL 0x0
#define ADSP2_CLOCKING 0x1
#define ADSP2V2_CLOCKING 0x2
#define ADSP2_STATUS1 0x4
#define ADSP2_WDMA_CONFIG_1 0x30
#define ADSP2_WDMA_CONFIG_2 0x31
#define ADSP2V2_WDMA_CONFIG_2 0x32
#define ADSP2_RDMA_CONFIG_1 0x34
#define ADSP2_SCRATCH0 0x40
#define ADSP2_SCRATCH1 0x41
#define ADSP2_SCRATCH2 0x42
#define ADSP2_SCRATCH3 0x43
#define ADSP2V2_SCRATCH0_1 0x40
#define ADSP2V2_SCRATCH2_3 0x42
/*
* ADSP2 Control
*/
#define ADSP2_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
#define ADSP2_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
#define ADSP2_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
#define ADSP2_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
#define ADSP2_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
#define ADSP2_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
#define ADSP2_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
#define ADSP2_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
#define ADSP2_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
#define ADSP2_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
#define ADSP2_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
#define ADSP2_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
#define ADSP2_START 0x0001 /* DSP1_START */
#define ADSP2_START_MASK 0x0001 /* DSP1_START */
#define ADSP2_START_SHIFT 0 /* DSP1_START */
#define ADSP2_START_WIDTH 1 /* DSP1_START */
/*
* ADSP2 clocking
*/
#define ADSP2_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
#define ADSP2_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
/*
* ADSP2V2 clocking
*/
#define ADSP2V2_CLK_SEL_MASK 0x70000 /* CLK_SEL_ENA */
#define ADSP2V2_CLK_SEL_SHIFT 16 /* CLK_SEL_ENA */
#define ADSP2V2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
#define ADSP2V2_RATE_MASK 0x7800 /* DSP_RATE */
#define ADSP2V2_RATE_SHIFT 11 /* DSP_RATE */
#define ADSP2V2_RATE_WIDTH 4 /* DSP_RATE */
/*
* ADSP2 Status 1
*/
#define ADSP2_RAM_RDY 0x0001
#define ADSP2_RAM_RDY_MASK 0x0001
#define ADSP2_RAM_RDY_SHIFT 0
#define ADSP2_RAM_RDY_WIDTH 1
/*
* ADSP2 Lock support
*/
#define ADSP2_LOCK_CODE_0 0x5555
#define ADSP2_LOCK_CODE_1 0xAAAA
#define ADSP2_WATCHDOG 0x0A
#define ADSP2_BUS_ERR_ADDR 0x52
#define ADSP2_REGION_LOCK_STATUS 0x64
#define ADSP2_LOCK_REGION_1_LOCK_REGION_0 0x66
#define ADSP2_LOCK_REGION_3_LOCK_REGION_2 0x68
#define ADSP2_LOCK_REGION_5_LOCK_REGION_4 0x6A
#define ADSP2_LOCK_REGION_7_LOCK_REGION_6 0x6C
#define ADSP2_LOCK_REGION_9_LOCK_REGION_8 0x6E
#define ADSP2_LOCK_REGION_CTRL 0x7A
#define ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR 0x7C
#define ADSP2_REGION_LOCK_ERR_MASK 0x8000
#define ADSP2_ADDR_ERR_MASK 0x4000
#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
#define ADSP2_CTRL_ERR_PAUSE_ENA 0x0002
#define ADSP2_CTRL_ERR_EINT 0x0001
#define ADSP2_BUS_ERR_ADDR_MASK 0x00FFFFFF
#define ADSP2_XMEM_ERR_ADDR_MASK 0x0000FFFF
#define ADSP2_PMEM_ERR_ADDR_MASK 0x7FFF0000
#define ADSP2_PMEM_ERR_ADDR_SHIFT 16
#define ADSP2_WDT_ENA_MASK 0xFFFFFFFD
#define ADSP2_LOCK_REGION_SHIFT 16
/*
* Event control messages
*/
#define CS_DSP_FW_EVENT_SHUTDOWN 0x000001
/*
* HALO system info
*/
#define HALO_AHBM_WINDOW_DEBUG_0 0x02040
#define HALO_AHBM_WINDOW_DEBUG_1 0x02044
/*
* HALO core
*/
#define HALO_SCRATCH1 0x005c0
#define HALO_SCRATCH2 0x005c8
#define HALO_SCRATCH3 0x005d0
#define HALO_SCRATCH4 0x005d8
#define HALO_CCM_CORE_CONTROL 0x41000
#define HALO_CORE_SOFT_RESET 0x00010
#define HALO_WDT_CONTROL 0x47000
/*
* HALO MPU banks
*/
#define HALO_MPU_XMEM_ACCESS_0 0x43000
#define HALO_MPU_YMEM_ACCESS_0 0x43004
#define HALO_MPU_WINDOW_ACCESS_0 0x43008
#define HALO_MPU_XREG_ACCESS_0 0x4300C
#define HALO_MPU_YREG_ACCESS_0 0x43014
#define HALO_MPU_XMEM_ACCESS_1 0x43018
#define HALO_MPU_YMEM_ACCESS_1 0x4301C
#define HALO_MPU_WINDOW_ACCESS_1 0x43020
#define HALO_MPU_XREG_ACCESS_1 0x43024
#define HALO_MPU_YREG_ACCESS_1 0x4302C
#define HALO_MPU_XMEM_ACCESS_2 0x43030
#define HALO_MPU_YMEM_ACCESS_2 0x43034
#define HALO_MPU_WINDOW_ACCESS_2 0x43038
#define HALO_MPU_XREG_ACCESS_2 0x4303C
#define HALO_MPU_YREG_ACCESS_2 0x43044
#define HALO_MPU_XMEM_ACCESS_3 0x43048
#define HALO_MPU_YMEM_ACCESS_3 0x4304C
#define HALO_MPU_WINDOW_ACCESS_3 0x43050
#define HALO_MPU_XREG_ACCESS_3 0x43054
#define HALO_MPU_YREG_ACCESS_3 0x4305C
#define HALO_MPU_XM_VIO_ADDR 0x43100
#define HALO_MPU_XM_VIO_STATUS 0x43104
#define HALO_MPU_YM_VIO_ADDR 0x43108
#define HALO_MPU_YM_VIO_STATUS 0x4310C
#define HALO_MPU_PM_VIO_ADDR 0x43110
#define HALO_MPU_PM_VIO_STATUS 0x43114
#define HALO_MPU_LOCK_CONFIG 0x43140
/*
* HALO_AHBM_WINDOW_DEBUG_1
*/
#define HALO_AHBM_CORE_ERR_ADDR_MASK 0x0fffff00
#define HALO_AHBM_CORE_ERR_ADDR_SHIFT 8
#define HALO_AHBM_FLAGS_ERR_MASK 0x000000ff
/*
* HALO_CCM_CORE_CONTROL
*/
#define HALO_CORE_RESET 0x00000200
#define HALO_CORE_EN 0x00000001
/*
* HALO_CORE_SOFT_RESET
*/
#define HALO_CORE_SOFT_RESET_MASK 0x00000001
/*
* HALO_WDT_CONTROL
*/
#define HALO_WDT_EN_MASK 0x00000001
/*
* HALO_MPU_?M_VIO_STATUS
*/
#define HALO_MPU_VIO_STS_MASK 0x007e0000
#define HALO_MPU_VIO_STS_SHIFT 17
#define HALO_MPU_VIO_ERR_WR_MASK 0x00008000
#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
struct cs_dsp_ops {
bool (*validate_version)(struct cs_dsp *dsp, unsigned int version);
unsigned int (*parse_sizes)(struct cs_dsp *dsp,
const char * const file,
unsigned int pos,
const struct firmware *firmware);
int (*setup_algs)(struct cs_dsp *dsp);
unsigned int (*region_to_reg)(struct cs_dsp_region const *mem,
unsigned int offset);
void (*show_fw_status)(struct cs_dsp *dsp);
void (*stop_watchdog)(struct cs_dsp *dsp);
int (*enable_memory)(struct cs_dsp *dsp);
void (*disable_memory)(struct cs_dsp *dsp);
int (*lock_memory)(struct cs_dsp *dsp, unsigned int lock_regions);
int (*enable_core)(struct cs_dsp *dsp);
void (*disable_core)(struct cs_dsp *dsp);
int (*start_core)(struct cs_dsp *dsp);
void (*stop_core)(struct cs_dsp *dsp);
};
static const struct cs_dsp_ops cs_dsp_adsp1_ops;
static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
static const struct cs_dsp_ops cs_dsp_halo_ops;
static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
struct cs_dsp_buf {
struct list_head list;
void *buf;
};
static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
struct list_head *list)
{
struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (buf == NULL)
return NULL;
buf->buf = vmalloc(len);
if (!buf->buf) {
kfree(buf);
return NULL;
}
memcpy(buf->buf, src, len);
if (list)
list_add_tail(&buf->list, list);
return buf;
}
static void cs_dsp_buf_free(struct list_head *list)
{
while (!list_empty(list)) {
struct cs_dsp_buf *buf = list_first_entry(list,
struct cs_dsp_buf,
list);
list_del(&buf->list);
vfree(buf->buf);
kfree(buf);
}
}
/**
* cs_dsp_mem_region_name() - Return a name string for a memory type
* @type: the memory type to match
*
* Return: A const string identifying the memory region.
*/
const char *cs_dsp_mem_region_name(unsigned int type)
{
switch (type) {
case WMFW_ADSP1_PM:
return "PM";
case WMFW_HALO_PM_PACKED:
return "PM_PACKED";
case WMFW_ADSP1_DM:
return "DM";
case WMFW_ADSP2_XM:
return "XM";
case WMFW_HALO_XM_PACKED:
return "XM_PACKED";
case WMFW_ADSP2_YM:
return "YM";
case WMFW_HALO_YM_PACKED:
return "YM_PACKED";
case WMFW_ADSP1_ZM:
return "ZM";
default:
return NULL;
}
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, FW_CS_DSP);
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
{
char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
kfree(dsp->wmfw_file_name);
dsp->wmfw_file_name = tmp;
}
static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
{
char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
kfree(dsp->bin_file_name);
dsp->bin_file_name = tmp;
}
static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
{
kfree(dsp->wmfw_file_name);
kfree(dsp->bin_file_name);
dsp->wmfw_file_name = NULL;
dsp->bin_file_name = NULL;
}
static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
ssize_t ret;
mutex_lock(&dsp->pwr_lock);
if (!dsp->wmfw_file_name || !dsp->booted)
ret = 0;
else
ret = simple_read_from_buffer(user_buf, count, ppos,
dsp->wmfw_file_name,
strlen(dsp->wmfw_file_name));
mutex_unlock(&dsp->pwr_lock);
return ret;
}
static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
ssize_t ret;
mutex_lock(&dsp->pwr_lock);
if (!dsp->bin_file_name || !dsp->booted)
ret = 0;
else
ret = simple_read_from_buffer(user_buf, count, ppos,
dsp->bin_file_name,
strlen(dsp->bin_file_name));
mutex_unlock(&dsp->pwr_lock);
return ret;
}
static const struct {
const char *name;
const struct file_operations fops;
} cs_dsp_debugfs_fops[] = {
{
.name = "wmfw_file_name",
.fops = {
.open = simple_open,
.read = cs_dsp_debugfs_wmfw_read,
},
},
{
.name = "bin_file_name",
.fops = {
.open = simple_open,
.read = cs_dsp_debugfs_bin_read,
},
},
};
static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
unsigned int off);
static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored)
{
struct cs_dsp *dsp = s->private;
struct cs_dsp_coeff_ctl *ctl;
unsigned int reg;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
cs_dsp_coeff_base_reg(ctl, ®, 0);
seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
ctl->subname_len, ctl->subname, ctl->len,
cs_dsp_mem_region_name(ctl->alg_region.type),
ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type,
ctl->flags & WMFW_CTL_FLAG_VOLATILE ? 'V' : '-',
ctl->flags & WMFW_CTL_FLAG_SYS ? 'S' : '-',
ctl->flags & WMFW_CTL_FLAG_READABLE ? 'R' : '-',
ctl->flags & WMFW_CTL_FLAG_WRITEABLE ? 'W' : '-',
ctl->enabled ? "enabled" : "disabled",
ctl->set ? "dirty" : "clean");
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(cs_dsp_debugfs_read_controls);
/**
* cs_dsp_init_debugfs() - Create and populate DSP representation in debugfs
* @dsp: pointer to DSP structure
* @debugfs_root: pointer to debugfs directory in which to create this DSP
* representation
*/
void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
{
struct dentry *root = NULL;
int i;
root = debugfs_create_dir(dsp->name, debugfs_root);
debugfs_create_bool("booted", 0444, root, &dsp->booted);
debugfs_create_bool("running", 0444, root, &dsp->running);
debugfs_create_x32("fw_id", 0444, root, &dsp->fw_id);
debugfs_create_x32("fw_version", 0444, root, &dsp->fw_id_version);
for (i = 0; i < ARRAY_SIZE(cs_dsp_debugfs_fops); ++i)
debugfs_create_file(cs_dsp_debugfs_fops[i].name, 0444, root,
dsp, &cs_dsp_debugfs_fops[i].fops);
debugfs_create_file("controls", 0444, root, dsp,
&cs_dsp_debugfs_read_controls_fops);
dsp->debugfs_root = root;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
/**
* cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
* @dsp: pointer to DSP structure
*/
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
cs_dsp_debugfs_clear(dsp);
debugfs_remove_recursive(dsp->debugfs_root);
dsp->debugfs_root = NULL;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
#else
void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
{
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_init_debugfs, FW_CS_DSP);
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
{
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_cleanup_debugfs, FW_CS_DSP);
static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
const char *s)
{
}
static inline void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp,
const char *s)
{
}
static inline void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
{
}
#endif
static const struct cs_dsp_region *cs_dsp_find_region(struct cs_dsp *dsp,
int type)
{
int i;
for (i = 0; i < dsp->num_mems; i++)
if (dsp->mem[i].type == type)
return &dsp->mem[i];
return NULL;
}
static unsigned int cs_dsp_region_to_reg(struct cs_dsp_region const *mem,
unsigned int offset)
{
switch (mem->type) {
case WMFW_ADSP1_PM:
return mem->base + (offset * 3);
case WMFW_ADSP1_DM:
case WMFW_ADSP2_XM:
case WMFW_ADSP2_YM:
case WMFW_ADSP1_ZM:
return mem->base + (offset * 2);
default:
WARN(1, "Unknown memory region type");
return offset;
}
}
static unsigned int cs_dsp_halo_region_to_reg(struct cs_dsp_region const *mem,
unsigned int offset)
{
switch (mem->type) {
case WMFW_ADSP2_XM:
case WMFW_ADSP2_YM:
return mem->base + (offset * 4);
case WMFW_HALO_XM_PACKED:
case WMFW_HALO_YM_PACKED:
return (mem->base + (offset * 3)) & ~0x3;
case WMFW_HALO_PM_PACKED:
return mem->base + (offset * 5);
default:
WARN(1, "Unknown memory region type");
return offset;
}
}
static void cs_dsp_read_fw_status(struct cs_dsp *dsp,
int noffs, unsigned int *offs)
{
unsigned int i;
int ret;
for (i = 0; i < noffs; ++i) {
ret = regmap_read(dsp->regmap, dsp->base + offs[i], &offs[i]);
if (ret) {
cs_dsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
return;
}
}
}
static void cs_dsp_adsp2_show_fw_status(struct cs_dsp *dsp)
{
unsigned int offs[] = {
ADSP2_SCRATCH0, ADSP2_SCRATCH1, ADSP2_SCRATCH2, ADSP2_SCRATCH3,
};
cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
offs[0], offs[1], offs[2], offs[3]);
}
static void cs_dsp_adsp2v2_show_fw_status(struct cs_dsp *dsp)
{
unsigned int offs[] = { ADSP2V2_SCRATCH0_1, ADSP2V2_SCRATCH2_3 };
cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
offs[0] & 0xFFFF, offs[0] >> 16,
offs[1] & 0xFFFF, offs[1] >> 16);
}
static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
{
unsigned int offs[] = {
HALO_SCRATCH1, HALO_SCRATCH2, HALO_SCRATCH3, HALO_SCRATCH4,
};
cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
offs[0], offs[1], offs[2], offs[3]);
}
static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
unsigned int off)
{
const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
struct cs_dsp *dsp = ctl->dsp;
const struct cs_dsp_region *mem;
mem = cs_dsp_find_region(dsp, alg_region->type);
if (!mem) {
cs_dsp_err(dsp, "No base for region %x\n",
alg_region->type);
return -EINVAL;
}
*reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off);
return 0;
}
/**
* cs_dsp_coeff_write_acked_control() - Sends event_id to the acked control
* @ctl: pointer to acked coefficient control
* @event_id: the value to write to the given acked control
*
* Once the value has been written to the control the function shall block
* until the running firmware acknowledges the write or timeout is exceeded.
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id)
{
struct cs_dsp *dsp = ctl->dsp;
__be32 val = cpu_to_be32(event_id);
unsigned int reg;
int i, ret;
lockdep_assert_held(&dsp->pwr_lock);
if (!dsp->running)
return -EPERM;
ret = cs_dsp_coeff_base_reg(ctl, ®, 0);
if (ret)
return ret;
cs_dsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
event_id, ctl->alg_region.alg,
cs_dsp_mem_region_name(ctl->alg_region.type), ctl->offset);
ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
if (ret) {
cs_dsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
return ret;
}
/*
* Poll for ack, we initially poll at ~1ms intervals for firmwares
* that respond quickly, then go to ~10ms polls. A firmware is unlikely
* to ack instantly so we do the first 1ms delay before reading the
* control to avoid a pointless bus transaction
*/
for (i = 0; i < CS_DSP_ACKED_CTL_TIMEOUT_MS;) {
switch (i) {
case 0 ... CS_DSP_ACKED_CTL_N_QUICKPOLLS - 1:
usleep_range(1000, 2000);
i++;
break;
default:
usleep_range(10000, 20000);
i += 10;
break;
}
ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
if (ret) {
cs_dsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
return ret;
}
if (val == 0) {
cs_dsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
return 0;
}
}
cs_dsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
reg, ctl->alg_region.alg,
cs_dsp_mem_region_name(ctl->alg_region.type),
ctl->offset);
return -ETIMEDOUT;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_acked_control, FW_CS_DSP);
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, const void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
ret = cs_dsp_coeff_base_reg(ctl, ®, off);
if (ret)
return ret;
scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
if (!scratch)
return -ENOMEM;
ret = regmap_raw_write(dsp->regmap, reg, scratch,
len);
if (ret) {
cs_dsp_err(dsp, "Failed to write %zu bytes to %x: %d\n",
len, reg, ret);
kfree(scratch);
return ret;
}
cs_dsp_dbg(dsp, "Wrote %zu bytes to %x\n", len, reg);
kfree(scratch);
return 0;
}
/**
* cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
* @ctl: pointer to coefficient control
* @off: word offset at which data should be written
* @buf: the buffer to write to the given control
* @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: < 0 on error, 1 when the control value changed and 0 when it has not.
*/
int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, const void *buf, size_t len)
{
int ret = 0;
if (!ctl)
return -ENOENT;
lockdep_assert_held(&ctl->dsp->pwr_lock);
if (len + off * sizeof(u32) > ctl->len)
return -EINVAL;
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
ret = -EPERM;
} else if (buf != ctl->cache) {
if (memcmp(ctl->cache + off * sizeof(u32), buf, len))
memcpy(ctl->cache + off * sizeof(u32), buf, len);
else
return 0;
}
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len);
if (ret < 0)
return ret;
return 1;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP);
static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
ret = cs_dsp_coeff_base_reg(ctl, ®, off);
if (ret)
return ret;
scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
if (!scratch)
return -ENOMEM;
ret = regmap_raw_read(dsp->regmap, reg, scratch, len);
if (ret) {
cs_dsp_err(dsp, "Failed to read %zu bytes from %x: %d\n",
len, reg, ret);
kfree(scratch);
return ret;
}
cs_dsp_dbg(dsp, "Read %zu bytes from %x\n", len, reg);
memcpy(buf, scratch, len);
kfree(scratch);
return 0;
}
/**
* cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
* @ctl: pointer to coefficient control
* @off: word offset at which data should be read
* @buf: the buffer to store to the given control
* @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
unsigned int off, void *buf, size_t len)
{
int ret = 0;
if (!ctl)
return -ENOENT;
lockdep_assert_held(&ctl->dsp->pwr_lock);
if (len + off * sizeof(u32) > ctl->len)
return -EINVAL;
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
if (ctl->enabled && ctl->dsp->running)
return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len);
else
return -EPERM;
} else {
if (!ctl->flags && ctl->enabled && ctl->dsp->running)
ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (buf != ctl->cache)
memcpy(buf, ctl->cache + off * sizeof(u32), len);
}
return ret;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP);
static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
int ret;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
if (!ctl->enabled || ctl->set)
continue;
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
continue;
/*
* For readable controls populate the cache from the DSP memory.
* For non-readable controls the cache was zero-filled when
* created so we don't need to do anything.
*/
if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (ret < 0)
return ret;
}
}
return 0;
}
static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
int ret;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
if (!ctl->enabled)
continue;
if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache,
ctl->len);
if (ret < 0)
return ret;
}
}
return 0;
}
static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
unsigned int event)
{
struct cs_dsp_coeff_ctl *ctl;
int ret;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
continue;
if (!ctl->enabled)
continue;
ret = cs_dsp_coeff_write_acked_control(ctl, event);
if (ret)
cs_dsp_warn(dsp,
"Failed to send 0x%x event to alg 0x%x (%d)\n",
event, ctl->alg_region.alg, ret);
}
}
static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
{
kfree(ctl->cache);
kfree(ctl->subname);
kfree(ctl);
}
static int cs_dsp_create_control(struct cs_dsp *dsp,
const struct cs_dsp_alg_region *alg_region,
unsigned int offset, unsigned int len,
const char *subname, unsigned int subname_len,
unsigned int flags, unsigned int type)
{
struct cs_dsp_coeff_ctl *ctl;
int ret;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
if (ctl->fw_name == dsp->fw_name &&
ctl->alg_region.alg == alg_region->alg &&
ctl->alg_region.type == alg_region->type) {
if ((!subname && !ctl->subname) ||
(subname && (ctl->subname_len == subname_len) &&
!strncmp(ctl->subname, subname, ctl->subname_len))) {
if (!ctl->enabled)
ctl->enabled = 1;
return 0;
}
}
}
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return -ENOMEM;
ctl->fw_name = dsp->fw_name;
ctl->alg_region = *alg_region;
if (subname && dsp->fw_ver >= 2) {
ctl->subname_len = subname_len;
ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
if (!ctl->subname) {
ret = -ENOMEM;
goto err_ctl;
}
}
ctl->enabled = 1;
ctl->set = 0;
ctl->dsp = dsp;
ctl->flags = flags;
ctl->type = type;
ctl->offset = offset;
ctl->len = len;
ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
if (!ctl->cache) {
ret = -ENOMEM;
goto err_ctl_subname;
}
list_add(&ctl->list, &dsp->ctl_list);
if (dsp->client_ops->control_add) {
ret = dsp->client_ops->control_add(ctl);
if (ret)
goto err_list_del;
}
return 0;
err_list_del:
list_del(&ctl->list);
kfree(ctl->cache);
err_ctl_subname:
kfree(ctl->subname);
err_ctl:
kfree(ctl);
return ret;
}
struct cs_dsp_coeff_parsed_alg {
int id;
const u8 *name;
int name_len;
int ncoeff;
};
struct cs_dsp_coeff_parsed_coeff {
int offset;
int mem_type;
const u8 *name;
int name_len;
unsigned int ctl_type;
int flags;
int len;
};
static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
{
int length;
switch (bytes) {
case 1:
length = **pos;
break;
case 2:
length = le16_to_cpu(*((__le16 *)*pos));
break;
default:
return 0;
}
if (str)
*str = *pos + bytes;
*pos += ((length + bytes) + 3) & ~0x03;
return length;
}
static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
{
int val = 0;
switch (bytes) {
case 2:
val = le16_to_cpu(*((__le16 *)*pos));
break;
case 4:
val = le32_to_cpu(*((__le32 *)*pos));
break;
default:
break;
}
*pos += bytes;
return val;
}
static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
struct cs_dsp_coeff_parsed_alg *blk)
{
const struct wmfw_adsp_alg_data *raw;
switch (dsp->fw_ver) {
case 0:
case 1:
raw = (const struct wmfw_adsp_alg_data *)*data;
*data = raw->data;
blk->id = le32_to_cpu(raw->id);
blk->name = raw->name;
blk->name_len = strlen(raw->name);
blk->ncoeff = le32_to_cpu(raw->ncoeff);
break;
default:
blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
&blk->name);
cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
break;
}
cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
}
static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
struct cs_dsp_coeff_parsed_coeff *blk)
{
const struct wmfw_adsp_coeff_data *raw;
const u8 *tmp;
int length;
switch (dsp->fw_ver) {
case 0:
case 1:
raw = (const struct wmfw_adsp_coeff_data *)*data;
*data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
blk->offset = le16_to_cpu(raw->hdr.offset);
blk->mem_type = le16_to_cpu(raw->hdr.type);
blk->name = raw->name;
blk->name_len = strlen(raw->name);
blk->ctl_type = le16_to_cpu(raw->ctl_type);
blk->flags = le16_to_cpu(raw->flags);
blk->len = le32_to_cpu(raw->len);
break;
default:
tmp = *data;
blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
&blk->name);
cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
*data = *data + sizeof(raw->hdr) + length;
break;
}
cs_dsp_dbg(dsp, "\tCoefficient type: %#x\n", blk->mem_type);
cs_dsp_dbg(dsp, "\tCoefficient offset: %#x\n", blk->offset);
cs_dsp_dbg(dsp, "\tCoefficient name: %.*s\n", blk->name_len, blk->name);
cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
}
static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
const struct cs_dsp_coeff_parsed_coeff *coeff_blk,
unsigned int f_required,
unsigned int f_illegal)
{
if ((coeff_blk->flags & f_illegal) ||
((coeff_blk->flags & f_required) != f_required)) {
cs_dsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
coeff_blk->flags, coeff_blk->ctl_type);
return -EINVAL;
}
return 0;
}
static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
const struct wmfw_region *region)
{
struct cs_dsp_alg_region alg_region = {};
struct cs_dsp_coeff_parsed_alg alg_blk;
struct cs_dsp_coeff_parsed_coeff coeff_blk;
const u8 *data = region->data;
int i, ret;
cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
for (i = 0; i < alg_blk.ncoeff; i++) {
cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
switch (coeff_blk.ctl_type) {
case WMFW_CTL_TYPE_BYTES:
break;
case WMFW_CTL_TYPE_ACKED:
if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
continue; /* ignore */
ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
WMFW_CTL_FLAG_VOLATILE |
WMFW_CTL_FLAG_WRITEABLE |
WMFW_CTL_FLAG_READABLE,
0);
if (ret)
return -EINVAL;
break;
case WMFW_CTL_TYPE_HOSTEVENT:
case WMFW_CTL_TYPE_FWEVENT:
ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
WMFW_CTL_FLAG_SYS |
WMFW_CTL_FLAG_VOLATILE |
WMFW_CTL_FLAG_WRITEABLE |
WMFW_CTL_FLAG_READABLE,
0);
if (ret)
return -EINVAL;
break;
case WMFW_CTL_TYPE_HOST_BUFFER:
ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
WMFW_CTL_FLAG_SYS |
WMFW_CTL_FLAG_VOLATILE |
WMFW_CTL_FLAG_READABLE,
0);
if (ret)
return -EINVAL;
break;
default:
cs_dsp_err(dsp, "Unknown control type: %d\n",
coeff_blk.ctl_type);
return -EINVAL;
}
alg_region.type = coeff_blk.mem_type;
alg_region.alg = alg_blk.id;
ret = cs_dsp_create_control(dsp, &alg_region,
coeff_blk.offset,
coeff_blk.len,
coeff_blk.name,
coeff_blk.name_len,
coeff_blk.flags,
coeff_blk.ctl_type);
if (ret < 0)
cs_dsp_err(dsp, "Failed to create control: %.*s, %d\n",
coeff_blk.name_len, coeff_blk.name, ret);
}
return 0;
}
static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
const char * const file,
unsigned int pos,
const struct firmware *firmware)
{
const struct wmfw_adsp1_sizes *adsp1_sizes;
adsp1_sizes = (void *)&firmware->data[pos];
cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
le32_to_cpu(adsp1_sizes->zm));
return pos + sizeof(*adsp1_sizes);
}
static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
const char * const file,
unsigned int pos,
const struct firmware *firmware)
{
const struct wmfw_adsp2_sizes *adsp2_sizes;
adsp2_sizes = (void *)&firmware->data[pos];
cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
le32_to_cpu(adsp2_sizes->pm), le32_to_cpu(adsp2_sizes->zm));
return pos + sizeof(*adsp2_sizes);
}
static bool cs_dsp_validate_version(struct cs_dsp *dsp, unsigned int version)
{
switch (version) {
case 0:
cs_dsp_warn(dsp, "Deprecated file format %d\n", version);
return true;
case 1:
case 2:
return true;
default:
return false;
}
}
static bool cs_dsp_halo_validate_version(struct cs_dsp *dsp, unsigned int version)
{
switch (version) {
case 3:
return true;
default:
return false;
}
}
static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
const char *file)
{
LIST_HEAD(buf_list);
struct regmap *regmap = dsp->regmap;
unsigned int pos = 0;
const struct wmfw_header *header;
const struct wmfw_adsp1_sizes *adsp1_sizes;
const struct wmfw_footer *footer;
const struct wmfw_region *region;
const struct cs_dsp_region *mem;
const char *region_name;
char *text = NULL;
struct cs_dsp_buf *buf;
unsigned int reg;
int regions = 0;
int ret, offset, type;
if (!firmware)
return 0;
ret = -EINVAL;
pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
if (pos >= firmware->size) {
cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
file, firmware->size);
goto out_fw;
}
header = (void *)&firmware->data[0];
if (memcmp(&header->magic[0], "WMFW", 4) != 0) {
cs_dsp_err(dsp, "%s: invalid magic\n", file);
goto out_fw;
}
if (!dsp->ops->validate_version(dsp, header->ver)) {
cs_dsp_err(dsp, "%s: unknown file format %d\n",
file, header->ver);
goto out_fw;
}
cs_dsp_info(dsp, "Firmware version: %d\n", header->ver);
dsp->fw_ver = header->ver;
if (header->core != dsp->type) {
cs_dsp_err(dsp, "%s: invalid core %d != %d\n",
file, header->core, dsp->type);
goto out_fw;
}
pos = sizeof(*header);
pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
footer = (void *)&firmware->data[pos];
pos += sizeof(*footer);
if (le32_to_cpu(header->len) != pos) {
cs_dsp_err(dsp, "%s: unexpected header length %d\n",
file, le32_to_cpu(header->len));
goto out_fw;
}
cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
le64_to_cpu(footer->timestamp));
while (pos < firmware->size &&
sizeof(*region) < firmware->size - pos) {
region = (void *)&(firmware->data[pos]);
region_name = "Unknown";
reg = 0;
text = NULL;
offset = le32_to_cpu(region->offset) & 0xffffff;
type = be32_to_cpu(region->type) & 0xff;
switch (type) {
case WMFW_NAME_TEXT:
region_name = "Firmware name";
text = kzalloc(le32_to_cpu(region->len) + 1,
GFP_KERNEL);
break;
case WMFW_ALGORITHM_DATA:
region_name = "Algorithm";
ret = cs_dsp_parse_coeff(dsp, region);
if (ret != 0)
goto out_fw;
break;
case WMFW_INFO_TEXT:
region_name = "Information";
text = kzalloc(le32_to_cpu(region->len) + 1,
GFP_KERNEL);
break;
case WMFW_ABSOLUTE:
region_name = "Absolute";
reg = offset;
break;
case WMFW_ADSP1_PM:
case WMFW_ADSP1_DM:
case WMFW_ADSP2_XM:
case WMFW_ADSP2_YM:
case WMFW_ADSP1_ZM:
case WMFW_HALO_PM_PACKED:
case WMFW_HALO_XM_PACKED:
case WMFW_HALO_YM_PACKED:
mem = cs_dsp_find_region(dsp, type);
if (!mem) {
cs_dsp_err(dsp, "No region of type: %x\n", type);
ret = -EINVAL;
goto out_fw;
}
region_name = cs_dsp_mem_region_name(type);
reg = dsp->ops->region_to_reg(mem, offset);
break;
default:
cs_dsp_warn(dsp,
"%s.%d: Unknown region type %x at %d(%x)\n",
file, regions, type, pos, pos);
break;
}
cs_dsp_dbg(dsp, "%s.%d: %d bytes at %d in %s\n", file,
regions, le32_to_cpu(region->len), offset,
region_name);
if (le32_to_cpu(region->len) >
firmware->size - pos - sizeof(*region)) {
cs_dsp_err(dsp,
"%s.%d: %s region len %d bytes exceeds file length %zu\n",
file, regions, region_name,
le32_to_cpu(region->len), firmware->size);
ret = -EINVAL;
goto out_fw;
}
if (text) {
memcpy(text, region->data, le32_to_cpu(region->len));
cs_dsp_info(dsp, "%s: %s\n", file, text);
kfree(text);
text = NULL;
}
if (reg) {
buf = cs_dsp_buf_alloc(region->data,
le32_to_cpu(region->len),
&buf_list);
if (!buf) {
cs_dsp_err(dsp, "Out of memory\n");
ret = -ENOMEM;
goto out_fw;
}
ret = regmap_raw_write_async(regmap, reg, buf->buf,
le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
file, regions,
le32_to_cpu(region->len), offset,
region_name, ret);
goto out_fw;
}
}
pos += le32_to_cpu(region->len) + sizeof(*region);
regions++;
}
ret = regmap_async_complete(regmap);
if (ret != 0) {
cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
goto out_fw;
}
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
cs_dsp_debugfs_save_wmfwname(dsp, file);
out_fw:
regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
kfree(text);
return ret;
}
/**
* cs_dsp_get_ctl() - Finds a matching coefficient control
* @dsp: pointer to DSP structure
* @name: pointer to string to match with a control's subname
* @type: the algorithm type to match
* @alg: the algorithm id to match
*
* Find cs_dsp_coeff_ctl with input name as its subname
*
* Return: pointer to the control on success, NULL if not found
*/
struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
unsigned int alg)
{
struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
lockdep_assert_held(&dsp->pwr_lock);
list_for_each_entry(pos, &dsp->ctl_list, list) {
if (!pos->subname)
continue;
if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
pos->fw_name == dsp->fw_name &&
pos->alg_region.alg == alg &&
pos->alg_region.type == type) {
rslt = pos;
break;
}
}
return rslt;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_get_ctl, FW_CS_DSP);
static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
const struct cs_dsp_alg_region *alg_region)
{
struct cs_dsp_coeff_ctl *ctl;
list_for_each_entry(ctl, &dsp->ctl_list, list) {
if (ctl->fw_name == dsp->fw_name &&
alg_region->alg == ctl->alg_region.alg &&
alg_region->type == ctl->alg_region.type) {
ctl->alg_region.base = alg_region->base;
}
}
}
static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
const struct cs_dsp_region *mem,
unsigned int pos, unsigned int len)
{
void *alg;
unsigned int reg;
int ret;
__be32 val;
if (n_algs == 0) {
cs_dsp_err(dsp, "No algorithms\n");
return ERR_PTR(-EINVAL);
}
if (n_algs > 1024) {
cs_dsp_err(dsp, "Algorithm count %zx excessive\n", n_algs);
return ERR_PTR(-EINVAL);
}
/* Read the terminator first to validate the length */
reg = dsp->ops->region_to_reg(mem, pos + len);
ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read algorithm list end: %d\n",
ret);
return ERR_PTR(ret);
}
if (be32_to_cpu(val) != 0xbedead)
cs_dsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
reg, be32_to_cpu(val));
/* Convert length from DSP words to bytes */
len *= sizeof(u32);
alg = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!alg)
return ERR_PTR(-ENOMEM);
reg = dsp->ops->region_to_reg(mem, pos);
ret = regmap_raw_read(dsp->regmap, reg, alg, len);
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read algorithm list: %d\n", ret);
kfree(alg);
return ERR_PTR(ret);
}
return alg;
}
/**
* cs_dsp_find_alg_region() - Finds a matching algorithm region
* @dsp: pointer to DSP structure
* @type: the algorithm type to match
* @id: the algorithm id to match
*
* Return: Pointer to matching algorithm region, or NULL if not found.
*/
struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
int type, unsigned int id)
{
struct cs_dsp_alg_region *alg_region;
lockdep_assert_held(&dsp->pwr_lock);
list_for_each_entry(alg_region, &dsp->alg_regions, list) {
if (id == alg_region->alg && type == alg_region->type)
return alg_region;
}
return NULL;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_find_alg_region, FW_CS_DSP);
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
__be32 ver, __be32 base)
{
struct cs_dsp_alg_region *alg_region;
alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
if (!alg_region)
return ERR_PTR(-ENOMEM);
alg_region->type = type;
alg_region->alg = be32_to_cpu(id);
alg_region->ver = be32_to_cpu(ver);
alg_region->base = be32_to_cpu(base);
list_add_tail(&alg_region->list, &dsp->alg_regions);
if (dsp->fw_ver > 0)
cs_dsp_ctl_fixup_base(dsp, alg_region);
return alg_region;
}
static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
{
struct cs_dsp_alg_region *alg_region;
while (!list_empty(&dsp->alg_regions)) {
alg_region = list_first_entry(&dsp->alg_regions,
struct cs_dsp_alg_region,
list);
list_del(&alg_region->list);
kfree(alg_region);
}
}
static void cs_dsp_parse_wmfw_id_header(struct cs_dsp *dsp,
struct wmfw_id_hdr *fw, int nalgs)
{
dsp->fw_id = be32_to_cpu(fw->id);
dsp->fw_id_version = be32_to_cpu(fw->ver);
cs_dsp_info(dsp, "Firmware: %x v%d.%d.%d, %d algorithms\n",
dsp->fw_id, (dsp->fw_id_version & 0xff0000) >> 16,
(dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
nalgs);
}
static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
struct wmfw_v3_id_hdr *fw, int nalgs)
{
dsp->fw_id = be32_to_cpu(fw->id);
dsp->fw_id_version = be32_to_cpu(fw->ver);
dsp->fw_vendor_id = be32_to_cpu(fw->vendor_id);
cs_dsp_info(dsp, "Firmware: %x vendor: 0x%x v%d.%d.%d, %d algorithms\n",
dsp->fw_id, dsp->fw_vendor_id,
(dsp->fw_id_version & 0xff0000) >> 16,
(dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
nalgs);
}
static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
int nregions, const int *type, __be32 *base)
{
struct cs_dsp_alg_region *alg_region;
int i;
for (i = 0; i < nregions; i++) {
alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
}
return 0;
}
static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
{
struct wmfw_adsp1_id_hdr adsp1_id;
struct wmfw_adsp1_alg_hdr *adsp1_alg;
struct cs_dsp_alg_region *alg_region;
const struct cs_dsp_region *mem;
unsigned int pos, len;
size_t n_algs;
int i, ret;
mem = cs_dsp_find_region(dsp, WMFW_ADSP1_DM);
if (WARN_ON(!mem))
return -EINVAL;
ret = regmap_raw_read(dsp->regmap, mem->base, &adsp1_id,
sizeof(adsp1_id));
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
ret);
return ret;
}
n_algs = be32_to_cpu(adsp1_id.n_algs);
cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
adsp1_id.fw.id, adsp1_id.fw.ver,
adsp1_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
adsp1_id.fw.id, adsp1_id.fw.ver,
adsp1_id.dm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
/* Calculate offset and length in DSP words */
pos = sizeof(adsp1_id) / sizeof(u32);
len = (sizeof(*adsp1_alg) * n_algs) / sizeof(u32);
adsp1_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
if (IS_ERR(adsp1_alg))
return PTR_ERR(adsp1_alg);
for (i = 0; i < n_algs; i++) {
cs_dsp_info(dsp, "%d: ID %x v%d.%d.%d DM@%x ZM@%x\n",
i, be32_to_cpu(adsp1_alg[i].alg.id),
(be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff0000) >> 16,
(be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff00) >> 8,
be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff,
be32_to_cpu(adsp1_alg[i].dm),
be32_to_cpu(adsp1_alg[i].zm));
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
adsp1_alg[i].alg.id,
adsp1_alg[i].alg.ver,
adsp1_alg[i].dm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
goto out;
}
if (dsp->fw_ver == 0) {
if (i + 1 < n_algs) {
len = be32_to_cpu(adsp1_alg[i + 1].dm);
len -= be32_to_cpu(adsp1_alg[i].dm);
len *= 4;
cs_dsp_create_control(dsp, alg_region, 0,
len, NULL, 0, 0,
WMFW_CTL_TYPE_BYTES);
} else {
cs_dsp_warn(dsp, "Missing length info for region DM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
}
}
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
adsp1_alg[i].alg.id,
adsp1_alg[i].alg.ver,
adsp1_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
goto out;
}
if (dsp->fw_ver == 0) {
if (i + 1 < n_algs) {
len = be32_to_cpu(adsp1_alg[i + 1].zm);
len -= be32_to_cpu(adsp1_alg[i].zm);
len *= 4;
cs_dsp_create_control(dsp, alg_region, 0,
len, NULL, 0, 0,
WMFW_CTL_TYPE_BYTES);
} else {
cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
}
}
}
out:
kfree(adsp1_alg);
return ret;
}
static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
{
struct wmfw_adsp2_id_hdr adsp2_id;
struct wmfw_adsp2_alg_hdr *adsp2_alg;
struct cs_dsp_alg_region *alg_region;
const struct cs_dsp_region *mem;
unsigned int pos, len;
size_t n_algs;
int i, ret;
mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
if (WARN_ON(!mem))
return -EINVAL;
ret = regmap_raw_read(dsp->regmap, mem->base, &adsp2_id,
sizeof(adsp2_id));
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
ret);
return ret;
}
n_algs = be32_to_cpu(adsp2_id.n_algs);
cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_id.fw.id, adsp2_id.fw.ver,
adsp2_id.xm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
adsp2_id.fw.id, adsp2_id.fw.ver,
adsp2_id.ym);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
adsp2_id.fw.id, adsp2_id.fw.ver,
adsp2_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
/* Calculate offset and length in DSP words */
pos = sizeof(adsp2_id) / sizeof(u32);
len = (sizeof(*adsp2_alg) * n_algs) / sizeof(u32);
adsp2_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
if (IS_ERR(adsp2_alg))
return PTR_ERR(adsp2_alg);
for (i = 0; i < n_algs; i++) {
cs_dsp_dbg(dsp,
"%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
i, be32_to_cpu(adsp2_alg[i].alg.id),
(be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
(be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
be32_to_cpu(adsp2_alg[i].xm),
be32_to_cpu(adsp2_alg[i].ym),
be32_to_cpu(adsp2_alg[i].zm));
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id,
adsp2_alg[i].alg.ver,
adsp2_alg[i].xm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
goto out;
}
if (dsp->fw_ver == 0) {
if (i + 1 < n_algs) {
len = be32_to_cpu(adsp2_alg[i + 1].xm);
len -= be32_to_cpu(adsp2_alg[i].xm);
len *= 4;
cs_dsp_create_control(dsp, alg_region, 0,
len, NULL, 0, 0,
WMFW_CTL_TYPE_BYTES);
} else {
cs_dsp_warn(dsp, "Missing length info for region XM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
}
}
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
adsp2_alg[i].alg.id,
adsp2_alg[i].alg.ver,
adsp2_alg[i].ym);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
goto out;
}
if (dsp->fw_ver == 0) {
if (i + 1 < n_algs) {
len = be32_to_cpu(adsp2_alg[i + 1].ym);
len -= be32_to_cpu(adsp2_alg[i].ym);
len *= 4;
cs_dsp_create_control(dsp, alg_region, 0,
len, NULL, 0, 0,
WMFW_CTL_TYPE_BYTES);
} else {
cs_dsp_warn(dsp, "Missing length info for region YM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
}
}
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
adsp2_alg[i].alg.id,
adsp2_alg[i].alg.ver,
adsp2_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
goto out;
}
if (dsp->fw_ver == 0) {
if (i + 1 < n_algs) {
len = be32_to_cpu(adsp2_alg[i + 1].zm);
len -= be32_to_cpu(adsp2_alg[i].zm);
len *= 4;
cs_dsp_create_control(dsp, alg_region, 0,
len, NULL, 0, 0,
WMFW_CTL_TYPE_BYTES);
} else {
cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
}
}
}
out:
kfree(adsp2_alg);
return ret;
}
static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
__be32 xm_base, __be32 ym_base)
{
static const int types[] = {
WMFW_ADSP2_XM, WMFW_HALO_XM_PACKED,
WMFW_ADSP2_YM, WMFW_HALO_YM_PACKED
};
__be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases);
}
static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
{
struct wmfw_halo_id_hdr halo_id;
struct wmfw_halo_alg_hdr *halo_alg;
const struct cs_dsp_region *mem;
unsigned int pos, len;
size_t n_algs;
int i, ret;
mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
if (WARN_ON(!mem))
return -EINVAL;
ret = regmap_raw_read(dsp->regmap, mem->base, &halo_id,
sizeof(halo_id));
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
ret);
return ret;
}
n_algs = be32_to_cpu(halo_id.n_algs);
cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver,
halo_id.xm_base, halo_id.ym_base);
if (ret)
return ret;
/* Calculate offset and length in DSP words */
pos = sizeof(halo_id) / sizeof(u32);
len = (sizeof(*halo_alg) * n_algs) / sizeof(u32);
halo_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
if (IS_ERR(halo_alg))
return PTR_ERR(halo_alg);
for (i = 0; i < n_algs; i++) {
cs_dsp_dbg(dsp,
"%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
i, be32_to_cpu(halo_alg[i].alg.id),
(be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
(be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
be32_to_cpu(halo_alg[i].xm_base),
be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
halo_alg[i].alg.ver,
halo_alg[i].xm_base,
halo_alg[i].ym_base);
if (ret)
goto out;
}
out:
kfree(halo_alg);
return ret;
}
static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware,
const char *file)
{
LIST_HEAD(buf_list);
struct regmap *regmap = dsp->regmap;
struct wmfw_coeff_hdr *hdr;
struct wmfw_coeff_item *blk;
const struct cs_dsp_region *mem;
struct cs_dsp_alg_region *alg_region;
const char *region_name;
int ret, pos, blocks, type, offset, reg, version;
char *text = NULL;
struct cs_dsp_buf *buf;
if (!firmware)
return 0;
ret = -EINVAL;
if (sizeof(*hdr) >= firmware->size) {
cs_dsp_err(dsp, "%s: coefficient file too short, %zu bytes\n",
file, firmware->size);
goto out_fw;
}
hdr = (void *)&firmware->data[0];
if (memcmp(hdr->magic, "WMDR", 4) != 0) {
cs_dsp_err(dsp, "%s: invalid coefficient magic\n", file);
goto out_fw;
}
switch (be32_to_cpu(hdr->rev) & 0xff) {
case 1:
case 2:
break;
default:
cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
file, be32_to_cpu(hdr->rev) & 0xff);
ret = -EINVAL;
goto out_fw;
}
cs_dsp_info(dsp, "%s: v%d.%d.%d\n", file,
(le32_to_cpu(hdr->ver) >> 16) & 0xff,
(le32_to_cpu(hdr->ver) >> 8) & 0xff,
le32_to_cpu(hdr->ver) & 0xff);
pos = le32_to_cpu(hdr->len);
blocks = 0;
while (pos < firmware->size &&
sizeof(*blk) < firmware->size - pos) {
blk = (void *)(&firmware->data[pos]);
type = le16_to_cpu(blk->type);
offset = le16_to_cpu(blk->offset);
version = le32_to_cpu(blk->ver) >> 8;
cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
file, blocks, le32_to_cpu(blk->id),
(le32_to_cpu(blk->ver) >> 16) & 0xff,
(le32_to_cpu(blk->ver) >> 8) & 0xff,
le32_to_cpu(blk->ver) & 0xff);
cs_dsp_dbg(dsp, "%s.%d: %d bytes at 0x%x in %x\n",
file, blocks, le32_to_cpu(blk->len), offset, type);
reg = 0;
region_name = "Unknown";
switch (type) {
case (WMFW_NAME_TEXT << 8):
text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL);
break;
case (WMFW_INFO_TEXT << 8):
case (WMFW_METADATA << 8):
break;
case (WMFW_ABSOLUTE << 8):
/*
* Old files may use this for global
* coefficients.
*/
if (le32_to_cpu(blk->id) == dsp->fw_id &&
offset == 0) {
region_name = "global coefficients";
mem = cs_dsp_find_region(dsp, type);
if (!mem) {
cs_dsp_err(dsp, "No ZM\n");
break;
}
reg = dsp->ops->region_to_reg(mem, 0);
} else {
region_name = "register";
reg = offset;
}
break;
case WMFW_ADSP1_DM:
case WMFW_ADSP1_ZM:
case WMFW_ADSP2_XM:
case WMFW_ADSP2_YM:
case WMFW_HALO_XM_PACKED:
case WMFW_HALO_YM_PACKED:
case WMFW_HALO_PM_PACKED:
cs_dsp_dbg(dsp, "%s.%d: %d bytes in %x for %x\n",
file, blocks, le32_to_cpu(blk->len),
type, le32_to_cpu(blk->id));
region_name = cs_dsp_mem_region_name(type);
mem = cs_dsp_find_region(dsp, type);
if (!mem) {
cs_dsp_err(dsp, "No base for region %x\n", type);
break;
}
alg_region = cs_dsp_find_alg_region(dsp, type,
le32_to_cpu(blk->id));
if (alg_region) {
if (version != alg_region->ver)
cs_dsp_warn(dsp,
"Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n",
(version >> 16) & 0xFF,
(version >> 8) & 0xFF,
version & 0xFF,
(alg_region->ver >> 16) & 0xFF,
(alg_region->ver >> 8) & 0xFF,
alg_region->ver & 0xFF);
reg = alg_region->base;
reg = dsp->ops->region_to_reg(mem, reg);
reg += offset;
} else {
cs_dsp_err(dsp, "No %s for algorithm %x\n",
region_name, le32_to_cpu(blk->id));
}
break;
default:
cs_dsp_err(dsp, "%s.%d: Unknown region type %x at %d\n",
file, blocks, type, pos);
break;
}
if (text) {
memcpy(text, blk->data, le32_to_cpu(blk->len));
cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text);
kfree(text);
text = NULL;
}
if (reg) {
if (le32_to_cpu(blk->len) >
firmware->size - pos - sizeof(*blk)) {
cs_dsp_err(dsp,
"%s.%d: %s region len %d bytes exceeds file length %zu\n",
file, blocks, region_name,
le32_to_cpu(blk->len),
firmware->size);
ret = -EINVAL;
goto out_fw;
}
buf = cs_dsp_buf_alloc(blk->data,
le32_to_cpu(blk->len),
&buf_list);
if (!buf) {
cs_dsp_err(dsp, "Out of memory\n");
ret = -ENOMEM;
goto out_fw;
}
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
ret = regmap_raw_write_async(regmap, reg, buf->buf,
le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
file, blocks, reg, region_name, ret);
}
}
pos += (le32_to_cpu(blk->len) + sizeof(*blk) + 3) & ~0x03;
blocks++;
}
ret = regmap_async_complete(regmap);
if (ret != 0)
cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
cs_dsp_debugfs_save_binname(dsp, file);
out_fw:
regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
kfree(text);
return ret;
}
static int cs_dsp_create_name(struct cs_dsp *dsp)
{
if (!dsp->name) {
dsp->name = devm_kasprintf(dsp->dev, GFP_KERNEL, "DSP%d",
dsp->num);
if (!dsp->name)
return -ENOMEM;
}
return 0;
}
static int cs_dsp_common_init(struct cs_dsp *dsp)
{
int ret;
ret = cs_dsp_create_name(dsp);
if (ret)
return ret;
INIT_LIST_HEAD(&dsp->alg_regions);
INIT_LIST_HEAD(&dsp->ctl_list);
mutex_init(&dsp->pwr_lock);
return 0;
}
/**
* cs_dsp_adsp1_init() - Initialise a cs_dsp structure representing a ADSP1 device
* @dsp: pointer to DSP structure
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_adsp1_init(struct cs_dsp *dsp)
{
dsp->ops = &cs_dsp_adsp1_ops;
return cs_dsp_common_init(dsp);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_init, FW_CS_DSP);
/**
* cs_dsp_adsp1_power_up() - Load and start the named firmware
* @dsp: pointer to DSP structure
* @wmfw_firmware: the firmware to be sent
* @wmfw_filename: file name of firmware to be sent
* @coeff_firmware: the coefficient data to be sent
* @coeff_filename: file name of coefficient to data be sent
* @fw_name: the user-friendly firmware name
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
const struct firmware *wmfw_firmware, char *wmfw_filename,
const struct firmware *coeff_firmware, char *coeff_filename,
const char *fw_name)
{
unsigned int val;
int ret;
mutex_lock(&dsp->pwr_lock);
dsp->fw_name = fw_name;
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_SYS_ENA, ADSP1_SYS_ENA);
/*
* For simplicity set the DSP clock rate to be the
* SYSCLK rate rather than making it configurable.
*/
if (dsp->sysclk_reg) {
ret = regmap_read(dsp->regmap, dsp->sysclk_reg, &val);
if (ret != 0) {
cs_dsp_err(dsp, "Failed to read SYSCLK state: %d\n", ret);
goto err_mutex;
}
val = (val & dsp->sysclk_mask) >> dsp->sysclk_shift;
ret = regmap_update_bits(dsp->regmap,
dsp->base + ADSP1_CONTROL_31,
ADSP1_CLK_SEL_MASK, val);
if (ret != 0) {
cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
goto err_mutex;
}
}
ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
if (ret != 0)
goto err_ena;
ret = cs_dsp_adsp1_setup_algs(dsp);
if (ret != 0)
goto err_ena;
ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
if (ret != 0)
goto err_ena;
/* Initialize caches for enabled and unset controls */
ret = cs_dsp_coeff_init_control_caches(dsp);
if (ret != 0)
goto err_ena;
/* Sync set controls */
ret = cs_dsp_coeff_sync_controls(dsp);
if (ret != 0)
goto err_ena;
dsp->booted = true;
/* Start the core running */
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_CORE_ENA | ADSP1_START,
ADSP1_CORE_ENA | ADSP1_START);
dsp->running = true;
mutex_unlock(&dsp->pwr_lock);
return 0;
err_ena:
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_SYS_ENA, 0);
err_mutex:
mutex_unlock(&dsp->pwr_lock);
return ret;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_up, FW_CS_DSP);
/**
* cs_dsp_adsp1_power_down() - Halts the DSP
* @dsp: pointer to DSP structure
*/
void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
mutex_lock(&dsp->pwr_lock);
dsp->running = false;
dsp->booted = false;
/* Halt the core */
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_CORE_ENA | ADSP1_START, 0);
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_19,
ADSP1_WDMA_BUFFER_LENGTH_MASK, 0);
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_SYS_ENA, 0);
list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
cs_dsp_free_alg_regions(dsp);
mutex_unlock(&dsp->pwr_lock);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp1_power_down, FW_CS_DSP);
static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
{
unsigned int val;
int ret, count;
/* Wait for the RAM to start, should be near instantaneous */
for (count = 0; count < 10; ++count) {
ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val);
if (ret != 0)
return ret;
if (val & ADSP2_RAM_RDY)
break;
usleep_range(250, 500);
}
if (!(val & ADSP2_RAM_RDY)) {
cs_dsp_err(dsp, "Failed to start DSP RAM\n");
return -EBUSY;
}
cs_dsp_dbg(dsp, "RAM ready after %d polls\n", count);
return 0;
}
static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
return cs_dsp_adsp2v2_enable_core(dsp);
}
static int cs_dsp_adsp2_lock(struct cs_dsp *dsp, unsigned int lock_regions)
{
struct regmap *regmap = dsp->regmap;
unsigned int code0, code1, lock_reg;
if (!(lock_regions & CS_ADSP2_REGION_ALL))
return 0;
lock_regions &= CS_ADSP2_REGION_ALL;
lock_reg = dsp->base + ADSP2_LOCK_REGION_1_LOCK_REGION_0;
while (lock_regions) {
code0 = code1 = 0;
if (lock_regions & BIT(0)) {
code0 = ADSP2_LOCK_CODE_0;
code1 = ADSP2_LOCK_CODE_1;
}
if (lock_regions & BIT(1)) {
code0 |= ADSP2_LOCK_CODE_0 << ADSP2_LOCK_REGION_SHIFT;
code1 |= ADSP2_LOCK_CODE_1 << ADSP2_LOCK_REGION_SHIFT;
}
regmap_write(regmap, lock_reg, code0);
regmap_write(regmap, lock_reg, code1);
lock_regions >>= 2;
lock_reg += 2;
}
return 0;
}
static int cs_dsp_adsp2_enable_memory(struct cs_dsp *dsp)
{
return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_MEM_ENA, ADSP2_MEM_ENA);
}
static void cs_dsp_adsp2_disable_memory(struct cs_dsp *dsp)
{
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_MEM_ENA, 0);
}
static void cs_dsp_adsp2_disable_core(struct cs_dsp *dsp)
{
regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_SYS_ENA, 0);
}
static void cs_dsp_adsp2v2_disable_core(struct cs_dsp *dsp)
{
regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
regmap_write(dsp->regmap, dsp->base + ADSP2V2_WDMA_CONFIG_2, 0);
}
static int cs_dsp_halo_configure_mpu(struct cs_dsp *dsp, unsigned int lock_regions)
{
struct reg_sequence config[] = {
{ dsp->base + HALO_MPU_LOCK_CONFIG, 0x5555 },
{ dsp->base + HALO_MPU_LOCK_CONFIG, 0xAAAA },
{ dsp->base + HALO_MPU_XMEM_ACCESS_0, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_YMEM_ACCESS_0, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_WINDOW_ACCESS_0, lock_regions },
{ dsp->base + HALO_MPU_XREG_ACCESS_0, lock_regions },
{ dsp->base + HALO_MPU_YREG_ACCESS_0, lock_regions },
{ dsp->base + HALO_MPU_XMEM_ACCESS_1, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_YMEM_ACCESS_1, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_WINDOW_ACCESS_1, lock_regions },
{ dsp->base + HALO_MPU_XREG_ACCESS_1, lock_regions },
{ dsp->base + HALO_MPU_YREG_ACCESS_1, lock_regions },
{ dsp->base + HALO_MPU_XMEM_ACCESS_2, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_YMEM_ACCESS_2, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_WINDOW_ACCESS_2, lock_regions },
{ dsp->base + HALO_MPU_XREG_ACCESS_2, lock_regions },
{ dsp->base + HALO_MPU_YREG_ACCESS_2, lock_regions },
{ dsp->base + HALO_MPU_XMEM_ACCESS_3, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_YMEM_ACCESS_3, 0xFFFFFFFF },
{ dsp->base + HALO_MPU_WINDOW_ACCESS_3, lock_regions },
{ dsp->base + HALO_MPU_XREG_ACCESS_3, lock_regions },
{ dsp->base + HALO_MPU_YREG_ACCESS_3, lock_regions },
{ dsp->base + HALO_MPU_LOCK_CONFIG, 0 },
};
return regmap_multi_reg_write(dsp->regmap, config, ARRAY_SIZE(config));
}
/**
* cs_dsp_set_dspclk() - Applies the given frequency to the given cs_dsp
* @dsp: pointer to DSP structure
* @freq: clock rate to set
*
* This is only for use on ADSP2 cores.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
{
int ret;
ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CLOCKING,
ADSP2_CLK_SEL_MASK,
freq << ADSP2_CLK_SEL_SHIFT);
if (ret)
cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
return ret;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_set_dspclk, FW_CS_DSP);
static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
{
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_WATCHDOG,
ADSP2_WDT_ENA_MASK, 0);
}
static void cs_dsp_halo_stop_watchdog(struct cs_dsp *dsp)
{
regmap_update_bits(dsp->regmap, dsp->base + HALO_WDT_CONTROL,
HALO_WDT_EN_MASK, 0);
}
/**
* cs_dsp_power_up() - Downloads firmware to the DSP
* @dsp: pointer to DSP structure
* @wmfw_firmware: the firmware to be sent
* @wmfw_filename: file name of firmware to be sent
* @coeff_firmware: the coefficient data to be sent
* @coeff_filename: file name of coefficient to data be sent
* @fw_name: the user-friendly firmware name
*
* This function is used on ADSP2 and Halo DSP cores, it powers-up the DSP core
* and downloads the firmware but does not start the firmware running. The
* cs_dsp booted flag will be set once completed and if the core has a low-power
* memory retention mode it will be put into this state after the firmware is
* downloaded.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_power_up(struct cs_dsp *dsp,
const struct firmware *wmfw_firmware, char *wmfw_filename,
const struct firmware *coeff_firmware, char *coeff_filename,
const char *fw_name)
{
int ret;
mutex_lock(&dsp->pwr_lock);
dsp->fw_name = fw_name;
if (dsp->ops->enable_memory) {
ret = dsp->ops->enable_memory(dsp);
if (ret != 0)
goto err_mutex;
}
if (dsp->ops->enable_core) {
ret = dsp->ops->enable_core(dsp);
if (ret != 0)
goto err_mem;
}
ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
if (ret != 0)
goto err_ena;
ret = dsp->ops->setup_algs(dsp);
if (ret != 0)
goto err_ena;
ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
if (ret != 0)
goto err_ena;
/* Initialize caches for enabled and unset controls */
ret = cs_dsp_coeff_init_control_caches(dsp);
if (ret != 0)
goto err_ena;
if (dsp->ops->disable_core)
dsp->ops->disable_core(dsp);
dsp->booted = true;
mutex_unlock(&dsp->pwr_lock);
return 0;
err_ena:
if (dsp->ops->disable_core)
dsp->ops->disable_core(dsp);
err_mem:
if (dsp->ops->disable_memory)
dsp->ops->disable_memory(dsp);
err_mutex:
mutex_unlock(&dsp->pwr_lock);
return ret;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_power_up, FW_CS_DSP);
/**
* cs_dsp_power_down() - Powers-down the DSP
* @dsp: pointer to DSP structure
*
* cs_dsp_stop() must have been called before this function. The core will be
* fully powered down and so the memory will not be retained.
*/
void cs_dsp_power_down(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
mutex_lock(&dsp->pwr_lock);
cs_dsp_debugfs_clear(dsp);
dsp->fw_id = 0;
dsp->fw_id_version = 0;
dsp->booted = false;
if (dsp->ops->disable_memory)
dsp->ops->disable_memory(dsp);
list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
cs_dsp_free_alg_regions(dsp);
mutex_unlock(&dsp->pwr_lock);
cs_dsp_dbg(dsp, "Shutdown complete\n");
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_power_down, FW_CS_DSP);
static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
{
return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_CORE_ENA | ADSP2_START,
ADSP2_CORE_ENA | ADSP2_START);
}
static void cs_dsp_adsp2_stop_core(struct cs_dsp *dsp)
{
regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_CORE_ENA | ADSP2_START, 0);
}
/**
* cs_dsp_run() - Starts the firmware running
* @dsp: pointer to DSP structure
*
* cs_dsp_power_up() must have previously been called successfully.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_run(struct cs_dsp *dsp)
{
int ret;
mutex_lock(&dsp->pwr_lock);
if (!dsp->booted) {
ret = -EIO;
goto err;
}
if (dsp->ops->enable_core) {
ret = dsp->ops->enable_core(dsp);
if (ret != 0)
goto err;
}
if (dsp->client_ops->pre_run) {
ret = dsp->client_ops->pre_run(dsp);
if (ret)
goto err;
}
/* Sync set controls */
ret = cs_dsp_coeff_sync_controls(dsp);
if (ret != 0)
goto err;
if (dsp->ops->lock_memory) {
ret = dsp->ops->lock_memory(dsp, dsp->lock_regions);
if (ret != 0) {
cs_dsp_err(dsp, "Error configuring MPU: %d\n", ret);
goto err;
}
}
if (dsp->ops->start_core) {
ret = dsp->ops->start_core(dsp);
if (ret != 0)
goto err;
}
dsp->running = true;
if (dsp->client_ops->post_run) {
ret = dsp->client_ops->post_run(dsp);
if (ret)
goto err;
}
mutex_unlock(&dsp->pwr_lock);
return 0;
err:
if (dsp->ops->stop_core)
dsp->ops->stop_core(dsp);
if (dsp->ops->disable_core)
dsp->ops->disable_core(dsp);
mutex_unlock(&dsp->pwr_lock);
return ret;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_run, FW_CS_DSP);
/**
* cs_dsp_stop() - Stops the firmware
* @dsp: pointer to DSP structure
*
* Memory will not be disabled so firmware will remain loaded.
*/
void cs_dsp_stop(struct cs_dsp *dsp)
{
/* Tell the firmware to cleanup */
cs_dsp_signal_event_controls(dsp, CS_DSP_FW_EVENT_SHUTDOWN);
if (dsp->ops->stop_watchdog)
dsp->ops->stop_watchdog(dsp);
/* Log firmware state, it can be useful for analysis */
if (dsp->ops->show_fw_status)
dsp->ops->show_fw_status(dsp);
mutex_lock(&dsp->pwr_lock);
if (dsp->client_ops->pre_stop)
dsp->client_ops->pre_stop(dsp);
dsp->running = false;
if (dsp->ops->stop_core)
dsp->ops->stop_core(dsp);
if (dsp->ops->disable_core)
dsp->ops->disable_core(dsp);
if (dsp->client_ops->post_stop)
dsp->client_ops->post_stop(dsp);
mutex_unlock(&dsp->pwr_lock);
cs_dsp_dbg(dsp, "Execution stopped\n");
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_stop, FW_CS_DSP);
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
int ret;
ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
HALO_CORE_RESET | HALO_CORE_EN,
HALO_CORE_RESET | HALO_CORE_EN);
if (ret)
return ret;
return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
HALO_CORE_RESET, 0);
}
static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
{
regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
HALO_CORE_EN, 0);
/* reset halo core with CORE_SOFT_RESET */
regmap_update_bits(dsp->regmap, dsp->base + HALO_CORE_SOFT_RESET,
HALO_CORE_SOFT_RESET_MASK, 1);
}
/**
* cs_dsp_adsp2_init() - Initialise a cs_dsp structure representing a ADSP2 core
* @dsp: pointer to DSP structure
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_adsp2_init(struct cs_dsp *dsp)
{
int ret;
switch (dsp->rev) {
case 0:
/*
* Disable the DSP memory by default when in reset for a small
* power saving.
*/
ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
ADSP2_MEM_ENA, 0);
if (ret) {
cs_dsp_err(dsp,
"Failed to clear memory retention: %d\n", ret);
return ret;
}
dsp->ops = &cs_dsp_adsp2_ops[0];
break;
case 1:
dsp->ops = &cs_dsp_adsp2_ops[1];
break;
default:
dsp->ops = &cs_dsp_adsp2_ops[2];
break;
}
return cs_dsp_common_init(dsp);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_init, FW_CS_DSP);
/**
* cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
* @dsp: pointer to DSP structure
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_halo_init(struct cs_dsp *dsp)
{
if (dsp->no_core_startstop)
dsp->ops = &cs_dsp_halo_ao_ops;
else
dsp->ops = &cs_dsp_halo_ops;
return cs_dsp_common_init(dsp);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_init, FW_CS_DSP);
/**
* cs_dsp_remove() - Clean a cs_dsp before deletion
* @dsp: pointer to DSP structure
*/
void cs_dsp_remove(struct cs_dsp *dsp)
{
struct cs_dsp_coeff_ctl *ctl;
while (!list_empty(&dsp->ctl_list)) {
ctl = list_first_entry(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
if (dsp->client_ops->control_remove)
dsp->client_ops->control_remove(ctl);
list_del(&ctl->list);
cs_dsp_free_ctl_blk(ctl);
}
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_remove, FW_CS_DSP);
/**
* cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
* @dsp: pointer to DSP structure
* @mem_type: the type of DSP memory containing the data to be read
* @mem_addr: the address of the data within the memory region
* @num_words: the length of the data to read
* @data: a buffer to store the fetched data
*
* If this is used to read unpacked 24-bit memory, each 24-bit DSP word will
* occupy 32-bits in data (MSbyte will be 0). This padding can be removed using
* cs_dsp_remove_padding()
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
unsigned int num_words, __be32 *data)
{
struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
unsigned int reg;
int ret;
lockdep_assert_held(&dsp->pwr_lock);
if (!mem)
return -EINVAL;
reg = dsp->ops->region_to_reg(mem, mem_addr);
ret = regmap_raw_read(dsp->regmap, reg, data,
sizeof(*data) * num_words);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_read_raw_data_block, FW_CS_DSP);
/**
* cs_dsp_read_data_word() - Reads a word from DSP memory
* @dsp: pointer to DSP structure
* @mem_type: the type of DSP memory containing the data to be read
* @mem_addr: the address of the data within the memory region
* @data: a buffer to store the fetched data
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data)
{
__be32 raw;
int ret;
ret = cs_dsp_read_raw_data_block(dsp, mem_type, mem_addr, 1, &raw);
if (ret < 0)
return ret;
*data = be32_to_cpu(raw) & 0x00ffffffu;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_read_data_word, FW_CS_DSP);
/**
* cs_dsp_write_data_word() - Writes a word to DSP memory
* @dsp: pointer to DSP structure
* @mem_type: the type of DSP memory containing the data to be written
* @mem_addr: the address of the data within the memory region
* @data: the data to be written
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data)
{
struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
__be32 val = cpu_to_be32(data & 0x00ffffffu);
unsigned int reg;
lockdep_assert_held(&dsp->pwr_lock);
if (!mem)
return -EINVAL;
reg = dsp->ops->region_to_reg(mem, mem_addr);
return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_write_data_word, FW_CS_DSP);
/**
* cs_dsp_remove_padding() - Convert unpacked words to packed bytes
* @buf: buffer containing DSP words read from DSP memory
* @nwords: number of words to convert
*
* DSP words from the register map have pad bytes and the data bytes
* are in swapped order. This swaps to the native endian order and
* strips the pad bytes.
*/
void cs_dsp_remove_padding(u32 *buf, int nwords)
{
const __be32 *pack_in = (__be32 *)buf;
u8 *pack_out = (u8 *)buf;
int i;
for (i = 0; i < nwords; i++) {
u32 word = be32_to_cpu(*pack_in++);
*pack_out++ = (u8)word;
*pack_out++ = (u8)(word >> 8);
*pack_out++ = (u8)(word >> 16);
}
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_remove_padding, FW_CS_DSP);
/**
* cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
* @dsp: pointer to DSP structure
*
* The firmware and DSP state will be logged for future analysis.
*/
void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
{
unsigned int val;
struct regmap *regmap = dsp->regmap;
int ret = 0;
mutex_lock(&dsp->pwr_lock);
ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
if (ret) {
cs_dsp_err(dsp,
"Failed to read Region Lock Ctrl register: %d\n", ret);
goto error;
}
if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
cs_dsp_err(dsp, "watchdog timeout error\n");
dsp->ops->stop_watchdog(dsp);
if (dsp->client_ops->watchdog_expired)
dsp->client_ops->watchdog_expired(dsp);
}
if (val & (ADSP2_ADDR_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
if (val & ADSP2_ADDR_ERR_MASK)
cs_dsp_err(dsp, "bus error: address error\n");
else
cs_dsp_err(dsp, "bus error: region lock error\n");
ret = regmap_read(regmap, dsp->base + ADSP2_BUS_ERR_ADDR, &val);
if (ret) {
cs_dsp_err(dsp,
"Failed to read Bus Err Addr register: %d\n",
ret);
goto error;
}
cs_dsp_err(dsp, "bus error address = 0x%x\n",
val & ADSP2_BUS_ERR_ADDR_MASK);
ret = regmap_read(regmap,
dsp->base + ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
&val);
if (ret) {
cs_dsp_err(dsp,
"Failed to read Pmem Xmem Err Addr register: %d\n",
ret);
goto error;
}
cs_dsp_err(dsp, "xmem error address = 0x%x\n",
val & ADSP2_XMEM_ERR_ADDR_MASK);
cs_dsp_err(dsp, "pmem error address = 0x%x\n",
(val & ADSP2_PMEM_ERR_ADDR_MASK) >>
ADSP2_PMEM_ERR_ADDR_SHIFT);
}
regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
error:
mutex_unlock(&dsp->pwr_lock);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_adsp2_bus_error, FW_CS_DSP);
/**
* cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
* @dsp: pointer to DSP structure
*
* The firmware and DSP state will be logged for future analysis.
*/
void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
{
struct regmap *regmap = dsp->regmap;
unsigned int fault[6];
struct reg_sequence clear[] = {
{ dsp->base + HALO_MPU_XM_VIO_STATUS, 0x0 },
{ dsp->base + HALO_MPU_YM_VIO_STATUS, 0x0 },
{ dsp->base + HALO_MPU_PM_VIO_STATUS, 0x0 },
};
int ret;
mutex_lock(&dsp->pwr_lock);
ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_1,
fault);
if (ret) {
cs_dsp_warn(dsp, "Failed to read AHB DEBUG_1: %d\n", ret);
goto exit_unlock;
}
cs_dsp_warn(dsp, "AHB: STATUS: 0x%x ADDR: 0x%x\n",
*fault & HALO_AHBM_FLAGS_ERR_MASK,
(*fault & HALO_AHBM_CORE_ERR_ADDR_MASK) >>
HALO_AHBM_CORE_ERR_ADDR_SHIFT);
ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_0,
fault);
if (ret) {
cs_dsp_warn(dsp, "Failed to read AHB DEBUG_0: %d\n", ret);
goto exit_unlock;
}
cs_dsp_warn(dsp, "AHB: SYS_ADDR: 0x%x\n", *fault);
ret = regmap_bulk_read(regmap, dsp->base + HALO_MPU_XM_VIO_ADDR,
fault, ARRAY_SIZE(fault));
if (ret) {
cs_dsp_warn(dsp, "Failed to read MPU fault info: %d\n", ret);
goto exit_unlock;
}
cs_dsp_warn(dsp, "XM: STATUS:0x%x ADDR:0x%x\n", fault[1], fault[0]);
cs_dsp_warn(dsp, "YM: STATUS:0x%x ADDR:0x%x\n", fault[3], fault[2]);
cs_dsp_warn(dsp, "PM: STATUS:0x%x ADDR:0x%x\n", fault[5], fault[4]);
ret = regmap_multi_reg_write(dsp->regmap, clear, ARRAY_SIZE(clear));
if (ret)
cs_dsp_warn(dsp, "Failed to clear MPU status: %d\n", ret);
exit_unlock:
mutex_unlock(&dsp->pwr_lock);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_bus_error, FW_CS_DSP);
/**
* cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
* @dsp: pointer to DSP structure
*
* This is logged for future analysis.
*/
void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
{
mutex_lock(&dsp->pwr_lock);
cs_dsp_warn(dsp, "WDT Expiry Fault\n");
dsp->ops->stop_watchdog(dsp);
if (dsp->client_ops->watchdog_expired)
dsp->client_ops->watchdog_expired(dsp);
mutex_unlock(&dsp->pwr_lock);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_halo_wdt_expire, FW_CS_DSP);
static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
.validate_version = cs_dsp_validate_version,
.parse_sizes = cs_dsp_adsp1_parse_sizes,
.region_to_reg = cs_dsp_region_to_reg,
};
static const struct cs_dsp_ops cs_dsp_adsp2_ops[] = {
{
.parse_sizes = cs_dsp_adsp2_parse_sizes,
.validate_version = cs_dsp_validate_version,
.setup_algs = cs_dsp_adsp2_setup_algs,
.region_to_reg = cs_dsp_region_to_reg,
.show_fw_status = cs_dsp_adsp2_show_fw_status,
.enable_memory = cs_dsp_adsp2_enable_memory,
.disable_memory = cs_dsp_adsp2_disable_memory,
.enable_core = cs_dsp_adsp2_enable_core,
.disable_core = cs_dsp_adsp2_disable_core,
.start_core = cs_dsp_adsp2_start_core,
.stop_core = cs_dsp_adsp2_stop_core,
},
{
.parse_sizes = cs_dsp_adsp2_parse_sizes,
.validate_version = cs_dsp_validate_version,
.setup_algs = cs_dsp_adsp2_setup_algs,
.region_to_reg = cs_dsp_region_to_reg,
.show_fw_status = cs_dsp_adsp2v2_show_fw_status,
.enable_memory = cs_dsp_adsp2_enable_memory,
.disable_memory = cs_dsp_adsp2_disable_memory,
.lock_memory = cs_dsp_adsp2_lock,
.enable_core = cs_dsp_adsp2v2_enable_core,
.disable_core = cs_dsp_adsp2v2_disable_core,
.start_core = cs_dsp_adsp2_start_core,
.stop_core = cs_dsp_adsp2_stop_core,
},
{
.parse_sizes = cs_dsp_adsp2_parse_sizes,
.validate_version = cs_dsp_validate_version,
.setup_algs = cs_dsp_adsp2_setup_algs,
.region_to_reg = cs_dsp_region_to_reg,
.show_fw_status = cs_dsp_adsp2v2_show_fw_status,
.stop_watchdog = cs_dsp_stop_watchdog,
.enable_memory = cs_dsp_adsp2_enable_memory,
.disable_memory = cs_dsp_adsp2_disable_memory,
.lock_memory = cs_dsp_adsp2_lock,
.enable_core = cs_dsp_adsp2v2_enable_core,
.disable_core = cs_dsp_adsp2v2_disable_core,
.start_core = cs_dsp_adsp2_start_core,
.stop_core = cs_dsp_adsp2_stop_core,
},
};
static const struct cs_dsp_ops cs_dsp_halo_ops = {
.parse_sizes = cs_dsp_adsp2_parse_sizes,
.validate_version = cs_dsp_halo_validate_version,
.setup_algs = cs_dsp_halo_setup_algs,
.region_to_reg = cs_dsp_halo_region_to_reg,
.show_fw_status = cs_dsp_halo_show_fw_status,
.stop_watchdog = cs_dsp_halo_stop_watchdog,
.lock_memory = cs_dsp_halo_configure_mpu,
.start_core = cs_dsp_halo_start_core,
.stop_core = cs_dsp_halo_stop_core,
};
static const struct cs_dsp_ops cs_dsp_halo_ao_ops = {
.parse_sizes = cs_dsp_adsp2_parse_sizes,
.validate_version = cs_dsp_halo_validate_version,
.setup_algs = cs_dsp_halo_setup_algs,
.region_to_reg = cs_dsp_halo_region_to_reg,
.show_fw_status = cs_dsp_halo_show_fw_status,
};
/**
* cs_dsp_chunk_write() - Format data to a DSP memory chunk
* @ch: Pointer to the chunk structure
* @nbits: Number of bits to write
* @val: Value to write
*
* This function sequentially writes values into the format required for DSP
* memory, it handles both inserting of the padding bytes and converting to
* big endian. Note that data is only committed to the chunk when a whole DSP
* words worth of data is available.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val)
{
int nwrite, i;
nwrite = min(CS_DSP_DATA_WORD_BITS - ch->cachebits, nbits);
ch->cache <<= nwrite;
ch->cache |= val >> (nbits - nwrite);
ch->cachebits += nwrite;
nbits -= nwrite;
if (ch->cachebits == CS_DSP_DATA_WORD_BITS) {
if (cs_dsp_chunk_end(ch))
return -ENOSPC;
ch->cache &= 0xFFFFFF;
for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
*ch->data++ = (ch->cache & 0xFF000000) >> CS_DSP_DATA_WORD_BITS;
ch->bytes += sizeof(ch->cache);
ch->cachebits = 0;
}
if (nbits)
return cs_dsp_chunk_write(ch, nbits, val);
return 0;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_write, FW_CS_DSP);
/**
* cs_dsp_chunk_flush() - Pad remaining data with zero and commit to chunk
* @ch: Pointer to the chunk structure
*
* As cs_dsp_chunk_write only writes data when a whole DSP word is ready to
* be written out it is possible that some data will remain in the cache, this
* function will pad that data with zeros upto a whole DSP word and write out.
*
* Return: Zero for success, a negative number on error.
*/
int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch)
{
if (!ch->cachebits)
return 0;
return cs_dsp_chunk_write(ch, CS_DSP_DATA_WORD_BITS - ch->cachebits, 0);
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_flush, FW_CS_DSP);
/**
* cs_dsp_chunk_read() - Parse data from a DSP memory chunk
* @ch: Pointer to the chunk structure
* @nbits: Number of bits to read
*
* This function sequentially reads values from a DSP memory formatted buffer,
* it handles both removing of the padding bytes and converting from big endian.
*
* Return: A negative number is returned on error, otherwise the read value.
*/
int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits)
{
int nread, i;
u32 result;
if (!ch->cachebits) {
if (cs_dsp_chunk_end(ch))
return -ENOSPC;
ch->cache = 0;
ch->cachebits = CS_DSP_DATA_WORD_BITS;
for (i = 0; i < sizeof(ch->cache); i++, ch->cache <<= BITS_PER_BYTE)
ch->cache |= *ch->data++;
ch->bytes += sizeof(ch->cache);
}
nread = min(ch->cachebits, nbits);
nbits -= nread;
result = ch->cache >> ((sizeof(ch->cache) * BITS_PER_BYTE) - nread);
ch->cache <<= nread;
ch->cachebits -= nread;
if (nbits)
result = (result << nbits) | cs_dsp_chunk_read(ch, nbits);
return result;
}
EXPORT_SYMBOL_NS_GPL(cs_dsp_chunk_read, FW_CS_DSP);
MODULE_DESCRIPTION("Cirrus Logic DSP Support");
MODULE_AUTHOR("Simon Trimmer <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/firmware/cirrus/cs_dsp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dev-path-parser.c - EFI Device Path parser
* Copyright (C) 2016 Lukas Wunner <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2) as
* published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/pci.h>
static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
struct acpi_device *adev;
struct device *phys_dev;
char hid[ACPI_ID_LEN];
u64 uid;
int ret;
if (node->header.length != 12)
return -EINVAL;
sprintf(hid, "%c%c%c%04X",
'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
node->acpi.hid >> 16);
for_each_acpi_dev_match(adev, hid, NULL, -1) {
ret = acpi_dev_uid_to_integer(adev, &uid);
if (ret == 0 && node->acpi.uid == uid)
break;
if (ret == -ENODATA && node->acpi.uid == 0)
break;
}
if (!adev)
return -ENODEV;
phys_dev = acpi_get_first_physical_node(adev);
if (phys_dev) {
*child = get_device(phys_dev);
acpi_dev_put(adev);
} else
*child = &adev->dev;
return 0;
}
static int __init match_pci_dev(struct device *dev, void *data)
{
unsigned int devfn = *(unsigned int *)data;
return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
}
static long __init parse_pci_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
unsigned int devfn;
if (node->header.length != 6)
return -EINVAL;
if (!parent)
return -EINVAL;
devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
*child = device_find_child(parent, &devfn, match_pci_dev);
if (!*child)
return -ENODEV;
return 0;
}
/*
* Insert parsers for further node types here.
*
* Each parser takes a pointer to the @node and to the @parent (will be NULL
* for the first device path node). If a device corresponding to @node was
* found below @parent, its reference count should be incremented and the
* device returned in @child.
*
* The return value should be 0 on success or a negative int on failure.
* The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
* (EFI_DEV_END_ENTIRE) signal the end of the device path, only
* parse_end_path() is supposed to return this.
*
* Be sure to validate the node length and contents before commencing the
* search for a device.
*/
static long __init parse_end_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
if (node->header.length != 4)
return -EINVAL;
if (node->header.sub_type != EFI_DEV_END_INSTANCE &&
node->header.sub_type != EFI_DEV_END_ENTIRE)
return -EINVAL;
if (!parent)
return -ENODEV;
*child = get_device(parent);
return node->header.sub_type;
}
/**
* efi_get_device_by_path - find device by EFI Device Path
* @node: EFI Device Path
* @len: maximum length of EFI Device Path in bytes
*
* Parse a series of EFI Device Path nodes at @node and find the corresponding
* device. If the device was found, its reference count is incremented and a
* pointer to it is returned. The caller needs to drop the reference with
* put_device() after use. The @node pointer is updated to point to the
* location immediately after the "End of Hardware Device Path" node.
*
* If another Device Path instance follows, @len is decremented by the number
* of bytes consumed. Otherwise @len is set to %0.
*
* If a Device Path node is malformed or its corresponding device is not found,
* @node is updated to point to this offending node and an ERR_PTR is returned.
*
* If @len is initially %0, the function returns %NULL. Thus, to iterate over
* all instances in a path, the following idiom may be used:
*
* while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
* // do something with dev
* put_device(dev);
* }
* if (IS_ERR(dev))
* // report error
*
* Devices can only be found if they're already instantiated. Most buses
* instantiate devices in the "subsys" initcall level, hence the earliest
* initcall level in which this function should be called is "fs".
*
* Returns the device on success or
* %ERR_PTR(-ENODEV) if no device was found,
* %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
* %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
*/
struct device * __init efi_get_device_by_path(const struct efi_dev_path **node,
size_t *len)
{
struct device *parent = NULL, *child;
long ret = 0;
if (!*len)
return NULL;
while (!ret) {
if (*len < 4 || *len < (*node)->header.length)
ret = -EINVAL;
else if ((*node)->header.type == EFI_DEV_ACPI &&
(*node)->header.sub_type == EFI_DEV_BASIC_ACPI)
ret = parse_acpi_path(*node, parent, &child);
else if ((*node)->header.type == EFI_DEV_HW &&
(*node)->header.sub_type == EFI_DEV_PCI)
ret = parse_pci_path(*node, parent, &child);
else if (((*node)->header.type == EFI_DEV_END_PATH ||
(*node)->header.type == EFI_DEV_END_PATH2))
ret = parse_end_path(*node, parent, &child);
else
ret = -ENOTSUPP;
put_device(parent);
if (ret < 0)
return ERR_PTR(ret);
parent = child;
*node = (void *)*node + (*node)->header.length;
*len -= (*node)->header.length;
}
if (ret == EFI_DEV_END_ENTIRE)
*len = 0;
return child;
}
| linux-master | drivers/firmware/efi/dev-path-parser.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* runtime-wrappers.c - Runtime Services function call wrappers
*
* Implementation summary:
* -----------------------
* 1. When user/kernel thread requests to execute efi_runtime_service(),
* enqueue work to efi_rts_wq.
* 2. Caller thread waits for completion until the work is finished
* because it's dependent on the return status and execution of
* efi_runtime_service().
* For instance, get_variable() and get_next_variable().
*
* Copyright (C) 2014 Linaro Ltd. <[email protected]>
*
* Split off from arch/x86/platform/efi/efi.c
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <[email protected]>
* Copyright (C) 1999-2002 Hewlett-Packard Co.
* Copyright (C) 2005-2008 Intel Co.
* Copyright (C) 2013 SuSE Labs
*/
#define pr_fmt(fmt) "efi: " fmt
#include <linux/bug.h>
#include <linux/efi.h>
#include <linux/irqflags.h>
#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <asm/efi.h>
/*
* Wrap around the new efi_call_virt_generic() macros so that the
* code doesn't get too cluttered:
*/
#define efi_call_virt(f, args...) \
arch_efi_call_virt(efi.runtime, f, args)
union efi_rts_args {
struct {
efi_time_t *time;
efi_time_cap_t *capabilities;
} GET_TIME;
struct {
efi_time_t *time;
} SET_TIME;
struct {
efi_bool_t *enabled;
efi_bool_t *pending;
efi_time_t *time;
} GET_WAKEUP_TIME;
struct {
efi_bool_t enable;
efi_time_t *time;
} SET_WAKEUP_TIME;
struct {
efi_char16_t *name;
efi_guid_t *vendor;
u32 *attr;
unsigned long *data_size;
void *data;
} GET_VARIABLE;
struct {
unsigned long *name_size;
efi_char16_t *name;
efi_guid_t *vendor;
} GET_NEXT_VARIABLE;
struct {
efi_char16_t *name;
efi_guid_t *vendor;
u32 attr;
unsigned long data_size;
void *data;
} SET_VARIABLE;
struct {
u32 attr;
u64 *storage_space;
u64 *remaining_space;
u64 *max_variable_size;
} QUERY_VARIABLE_INFO;
struct {
u32 *high_count;
} GET_NEXT_HIGH_MONO_COUNT;
struct {
efi_capsule_header_t **capsules;
unsigned long count;
unsigned long sg_list;
} UPDATE_CAPSULE;
struct {
efi_capsule_header_t **capsules;
unsigned long count;
u64 *max_size;
int *reset_type;
} QUERY_CAPSULE_CAPS;
struct {
efi_status_t (__efiapi *acpi_prm_handler)(u64, void *);
u64 param_buffer_addr;
void *context;
} ACPI_PRM_HANDLER;
};
struct efi_runtime_work efi_rts_work;
/*
* efi_queue_work: Queue EFI runtime service call and wait for completion
* @_rts: EFI runtime service function identifier
* @_args: Arguments to pass to the EFI runtime service
*
* Accesses to efi_runtime_services() are serialized by a binary
* semaphore (efi_runtime_lock) and caller waits until the work is
* finished, hence _only_ one work is queued at a time and the caller
* thread waits for completion.
*/
#define efi_queue_work(_rts, _args...) \
__efi_queue_work(EFI_ ## _rts, \
&(union efi_rts_args){ ._rts = { _args }})
#ifndef arch_efi_save_flags
#define arch_efi_save_flags(state_flags) local_save_flags(state_flags)
#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags)
#endif
unsigned long efi_call_virt_save_flags(void)
{
unsigned long flags;
arch_efi_save_flags(flags);
return flags;
}
void efi_call_virt_check_flags(unsigned long flags, const void *caller)
{
unsigned long cur_flags, mismatch;
cur_flags = efi_call_virt_save_flags();
mismatch = flags ^ cur_flags;
if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
return;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI call from %pS\n",
flags, cur_flags, caller ?: __builtin_return_address(0));
arch_efi_restore_flags(flags);
}
/*
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
* reentrant, and there are particular combinations of calls that need to be
* serialized. (source: UEFI Specification v2.4A)
*
* Table 31. Rules for Reentry Into Runtime Services
* +------------------------------------+-------------------------------+
* | If previous call is busy in | Forbidden to call |
* +------------------------------------+-------------------------------+
* | Any | SetVirtualAddressMap() |
* +------------------------------------+-------------------------------+
* | ConvertPointer() | ConvertPointer() |
* +------------------------------------+-------------------------------+
* | SetVariable() | ResetSystem() |
* | UpdateCapsule() | |
* | SetTime() | |
* | SetWakeupTime() | |
* | GetNextHighMonotonicCount() | |
* +------------------------------------+-------------------------------+
* | GetVariable() | GetVariable() |
* | GetNextVariableName() | GetNextVariableName() |
* | SetVariable() | SetVariable() |
* | QueryVariableInfo() | QueryVariableInfo() |
* | UpdateCapsule() | UpdateCapsule() |
* | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() |
* | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() |
* +------------------------------------+-------------------------------+
* | GetTime() | GetTime() |
* | SetTime() | SetTime() |
* | GetWakeupTime() | GetWakeupTime() |
* | SetWakeupTime() | SetWakeupTime() |
* +------------------------------------+-------------------------------+
*
* Due to the fact that the EFI pstore may write to the variable store in
* interrupt context, we need to use a lock for at least the groups that
* contain SetVariable() and QueryVariableInfo(). That leaves little else, as
* none of the remaining functions are actually ever called at runtime.
* So let's just use a single lock to serialize all Runtime Services calls.
*/
static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
/*
* Expose the EFI runtime lock to the UV platform
*/
#ifdef CONFIG_X86_UV
extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
#endif
/*
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
*/
static void efi_call_rts(struct work_struct *work)
{
const union efi_rts_args *args = efi_rts_work.args;
efi_status_t status = EFI_NOT_FOUND;
unsigned long flags;
arch_efi_call_virt_setup();
flags = efi_call_virt_save_flags();
switch (efi_rts_work.efi_rts_id) {
case EFI_GET_TIME:
status = efi_call_virt(get_time,
args->GET_TIME.time,
args->GET_TIME.capabilities);
break;
case EFI_SET_TIME:
status = efi_call_virt(set_time,
args->SET_TIME.time);
break;
case EFI_GET_WAKEUP_TIME:
status = efi_call_virt(get_wakeup_time,
args->GET_WAKEUP_TIME.enabled,
args->GET_WAKEUP_TIME.pending,
args->GET_WAKEUP_TIME.time);
break;
case EFI_SET_WAKEUP_TIME:
status = efi_call_virt(set_wakeup_time,
args->SET_WAKEUP_TIME.enable,
args->SET_WAKEUP_TIME.time);
break;
case EFI_GET_VARIABLE:
status = efi_call_virt(get_variable,
args->GET_VARIABLE.name,
args->GET_VARIABLE.vendor,
args->GET_VARIABLE.attr,
args->GET_VARIABLE.data_size,
args->GET_VARIABLE.data);
break;
case EFI_GET_NEXT_VARIABLE:
status = efi_call_virt(get_next_variable,
args->GET_NEXT_VARIABLE.name_size,
args->GET_NEXT_VARIABLE.name,
args->GET_NEXT_VARIABLE.vendor);
break;
case EFI_SET_VARIABLE:
status = efi_call_virt(set_variable,
args->SET_VARIABLE.name,
args->SET_VARIABLE.vendor,
args->SET_VARIABLE.attr,
args->SET_VARIABLE.data_size,
args->SET_VARIABLE.data);
break;
case EFI_QUERY_VARIABLE_INFO:
status = efi_call_virt(query_variable_info,
args->QUERY_VARIABLE_INFO.attr,
args->QUERY_VARIABLE_INFO.storage_space,
args->QUERY_VARIABLE_INFO.remaining_space,
args->QUERY_VARIABLE_INFO.max_variable_size);
break;
case EFI_GET_NEXT_HIGH_MONO_COUNT:
status = efi_call_virt(get_next_high_mono_count,
args->GET_NEXT_HIGH_MONO_COUNT.high_count);
break;
case EFI_UPDATE_CAPSULE:
status = efi_call_virt(update_capsule,
args->UPDATE_CAPSULE.capsules,
args->UPDATE_CAPSULE.count,
args->UPDATE_CAPSULE.sg_list);
break;
case EFI_QUERY_CAPSULE_CAPS:
status = efi_call_virt(query_capsule_caps,
args->QUERY_CAPSULE_CAPS.capsules,
args->QUERY_CAPSULE_CAPS.count,
args->QUERY_CAPSULE_CAPS.max_size,
args->QUERY_CAPSULE_CAPS.reset_type);
break;
case EFI_ACPI_PRM_HANDLER:
#ifdef CONFIG_ACPI_PRMT
status = arch_efi_call_virt(args, ACPI_PRM_HANDLER.acpi_prm_handler,
args->ACPI_PRM_HANDLER.param_buffer_addr,
args->ACPI_PRM_HANDLER.context);
break;
#endif
default:
/*
* Ideally, we should never reach here because a caller of this
* function should have put the right efi_runtime_service()
* function identifier into efi_rts_work->efi_rts_id
*/
pr_err("Requested executing invalid EFI Runtime Service.\n");
}
efi_call_virt_check_flags(flags, efi_rts_work.caller);
arch_efi_call_virt_teardown();
efi_rts_work.status = status;
complete(&efi_rts_work.efi_rts_comp);
}
static efi_status_t __efi_queue_work(enum efi_rts_ids id,
union efi_rts_args *args)
{
efi_rts_work.efi_rts_id = id;
efi_rts_work.args = args;
efi_rts_work.caller = __builtin_return_address(0);
efi_rts_work.status = EFI_ABORTED;
if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
pr_warn_once("EFI Runtime Services are disabled!\n");
efi_rts_work.status = EFI_DEVICE_ERROR;
goto exit;
}
init_completion(&efi_rts_work.efi_rts_comp);
INIT_WORK(&efi_rts_work.work, efi_call_rts);
/*
* queue_work() returns 0 if work was already on queue,
* _ideally_ this should never happen.
*/
if (queue_work(efi_rts_wq, &efi_rts_work.work))
wait_for_completion(&efi_rts_work.efi_rts_comp);
else
pr_err("Failed to queue work to efi_rts_wq.\n");
WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED);
exit:
efi_rts_work.efi_rts_id = EFI_NONE;
return efi_rts_work.status;
}
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(GET_TIME, tm, tc);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_set_time(efi_time_t *tm)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(SET_TIME, tm);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
efi_bool_t *pending,
efi_time_t *tm)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(SET_WAKEUP_TIME, enabled, tm);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_variable(efi_char16_t *name,
efi_guid_t *vendor,
u32 *attr,
unsigned long *data_size,
void *data)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
efi_guid_t *vendor,
u32 attr,
unsigned long data_size,
void *data)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(SET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
}
static efi_status_t
virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
unsigned long data_size, void *data)
{
efi_status_t status;
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor,
attr, data_size, data);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
u64 *storage_space,
u64 *remaining_space,
u64 *max_variable_size)
{
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(QUERY_VARIABLE_INFO, attr, storage_space,
remaining_space, max_variable_size);
up(&efi_runtime_lock);
return status;
}
static efi_status_t
virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
u64 *remaining_space, u64 *max_variable_size)
{
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr,
storage_space, remaining_space,
max_variable_size);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count);
up(&efi_runtime_lock);
return status;
}
static void virt_efi_reset_system(int reset_type,
efi_status_t status,
unsigned long data_size,
efi_char16_t *data)
{
if (down_trylock(&efi_runtime_lock)) {
pr_warn("failed to invoke the reset_system() runtime service:\n"
"could not get exclusive access to the firmware\n");
return;
}
arch_efi_call_virt_setup();
efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
arch_efi_call_virt(efi.runtime, reset_system, reset_type, status,
data_size, data);
arch_efi_call_virt_teardown();
up(&efi_runtime_lock);
}
static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
unsigned long count,
unsigned long sg_list)
{
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(UPDATE_CAPSULE, capsules, count, sg_list);
up(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
unsigned long count,
u64 *max_size,
int *reset_type)
{
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, count,
max_size, reset_type);
up(&efi_runtime_lock);
return status;
}
void __init efi_native_runtime_setup(void)
{
efi.get_time = virt_efi_get_time;
efi.set_time = virt_efi_set_time;
efi.get_wakeup_time = virt_efi_get_wakeup_time;
efi.set_wakeup_time = virt_efi_set_wakeup_time;
efi.get_variable = virt_efi_get_variable;
efi.get_next_variable = virt_efi_get_next_variable;
efi.set_variable = virt_efi_set_variable;
efi.set_variable_nonblocking = virt_efi_set_variable_nb;
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system;
efi.query_variable_info = virt_efi_query_variable_info;
efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nb;
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
}
#ifdef CONFIG_ACPI_PRMT
efi_status_t
efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
u64 param_buffer_addr, void *context)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
status = efi_queue_work(ACPI_PRM_HANDLER, handler_addr,
param_buffer_addr, context);
up(&efi_runtime_lock);
return status;
}
#endif
| linux-master | drivers/firmware/efi/runtime-wrappers.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common EFI memory map functions.
*/
#define pr_fmt(fmt) "efi: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/early_ioremap.h>
#include <asm/efi.h>
#ifndef __efi_memmap_free
#define __efi_memmap_free(phys, size, flags) do { } while (0)
#endif
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
*
* This function takes care of figuring out which function to use to
* map the EFI memory map in efi.memmap based on how far into the boot
* we are.
*
* During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
* only have access to the early_memremap*() functions as the vmalloc
* space isn't setup. Once the kernel is fully booted we can fallback
* to the more robust memremap*() API.
*
* Returns zero on success, a negative error code on failure.
*/
int __init __efi_memmap_init(struct efi_memory_map_data *data)
{
struct efi_memory_map map;
phys_addr_t phys_map;
phys_map = data->phys_map;
if (data->flags & EFI_MEMMAP_LATE)
map.map = memremap(phys_map, data->size, MEMREMAP_WB);
else
map.map = early_memremap(phys_map, data->size);
if (!map.map) {
pr_err("Could not map the memory map!\n");
return -ENOMEM;
}
if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
__efi_memmap_free(efi.memmap.phys_map,
efi.memmap.desc_size * efi.memmap.nr_map,
efi.memmap.flags);
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
map.desc_version = data->desc_version;
map.desc_size = data->desc_size;
map.flags = data->flags;
set_bit(EFI_MEMMAP, &efi.flags);
efi.memmap = map;
return 0;
}
/**
* efi_memmap_init_early - Map the EFI memory map data structure
* @data: EFI memory map data
*
* Use early_memremap() to map the passed in EFI memory map and assign
* it to efi.memmap.
*/
int __init efi_memmap_init_early(struct efi_memory_map_data *data)
{
/* Cannot go backwards */
WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
data->flags = 0;
return __efi_memmap_init(data);
}
void __init efi_memmap_unmap(void)
{
if (!efi_enabled(EFI_MEMMAP))
return;
if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
unsigned long size;
size = efi.memmap.desc_size * efi.memmap.nr_map;
early_memunmap(efi.memmap.map, size);
} else {
memunmap(efi.memmap.map);
}
efi.memmap.map = NULL;
clear_bit(EFI_MEMMAP, &efi.flags);
}
/**
* efi_memmap_init_late - Map efi.memmap with memremap()
* @phys_addr: Physical address of the new EFI memory map
* @size: Size in bytes of the new EFI memory map
*
* Setup a mapping of the EFI memory map using ioremap_cache(). This
* function should only be called once the vmalloc space has been
* setup and is therefore not suitable for calling during early EFI
* initialise, e.g. in efi_init(). Additionally, it expects
* efi_memmap_init_early() to have already been called.
*
* The reason there are two EFI memmap initialisation
* (efi_memmap_init_early() and this late version) is because the
* early EFI memmap should be explicitly unmapped once EFI
* initialisation is complete as the fixmap space used to map the EFI
* memmap (via early_memremap()) is a scarce resource.
*
* This late mapping is intended to persist for the duration of
* runtime so that things like efi_mem_desc_lookup() and
* efi_mem_attributes() always work.
*
* Returns zero on success, a negative error code on failure.
*/
int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
{
struct efi_memory_map_data data = {
.phys_map = addr,
.size = size,
.flags = EFI_MEMMAP_LATE,
};
/* Did we forget to unmap the early EFI memmap? */
WARN_ON(efi.memmap.map);
/* Were we already called? */
WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
/*
* It makes no sense to allow callers to register different
* values for the following fields. Copy them out of the
* existing early EFI memmap.
*/
data.desc_version = efi.memmap.desc_version;
data.desc_size = efi.memmap.desc_size;
return __efi_memmap_init(&data);
}
| linux-master | drivers/firmware/efi/memmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* mokvar-table.c
*
* Copyright (c) 2020 Red Hat
* Author: Lenny Szubowicz <[email protected]>
*
* This module contains the kernel support for the Linux EFI Machine
* Owner Key (MOK) variable configuration table, which is identified by
* the LINUX_EFI_MOK_VARIABLE_TABLE_GUID.
*
* This EFI configuration table provides a more robust alternative to
* EFI volatile variables by which an EFI boot loader can pass the
* contents of the Machine Owner Key (MOK) certificate stores to the
* kernel during boot. If both the EFI MOK config table and corresponding
* EFI MOK variables are present, the table should be considered as
* more authoritative.
*
* This module includes code that validates and maps the EFI MOK table,
* if it's presence was detected very early in boot.
*
* Kernel interface routines are provided to walk through all the
* entries in the MOK config table or to search for a specific named
* entry.
*
* The contents of the individual named MOK config table entries are
* made available to user space via read-only sysfs binary files under:
*
* /sys/firmware/efi/mok-variables/
*
*/
#define pr_fmt(fmt) "mokvar: " fmt
#include <linux/capability.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/early_ioremap.h>
/*
* The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table is a packed
* sequence of struct efi_mokvar_table_entry, one for each named
* MOK variable. The sequence is terminated by an entry with a
* completely NULL name and 0 data size.
*
* efi_mokvar_table_size is set to the computed size of the
* MOK config table by efi_mokvar_table_init(). This will be
* non-zero if and only if the table if present and has been
* validated by efi_mokvar_table_init().
*/
static size_t efi_mokvar_table_size;
/*
* efi_mokvar_table_va is the kernel virtual address at which the
* EFI MOK config table has been mapped by efi_mokvar_sysfs_init().
*/
static struct efi_mokvar_table_entry *efi_mokvar_table_va;
/*
* Each /sys/firmware/efi/mok-variables/ sysfs file is represented by
* an instance of struct efi_mokvar_sysfs_attr on efi_mokvar_sysfs_list.
* bin_attr.private points to the associated EFI MOK config table entry.
*
* This list is created during boot and then remains unchanged.
* So no synchronization is currently required to walk the list.
*/
struct efi_mokvar_sysfs_attr {
struct bin_attribute bin_attr;
struct list_head node;
};
static LIST_HEAD(efi_mokvar_sysfs_list);
static struct kobject *mokvar_kobj;
/*
* efi_mokvar_table_init() - Early boot validation of EFI MOK config table
*
* If present, validate and compute the size of the EFI MOK variable
* configuration table. This table may be provided by an EFI boot loader
* as an alternative to ordinary EFI variables, due to platform-dependent
* limitations. The memory occupied by this table is marked as reserved.
*
* This routine must be called before efi_free_boot_services() in order
* to guarantee that it can mark the table as reserved.
*
* Implicit inputs:
* efi.mokvar_table: Physical address of EFI MOK variable config table
* or special value that indicates no such table.
*
* Implicit outputs:
* efi_mokvar_table_size: Computed size of EFI MOK variable config table.
* The table is considered present and valid if this
* is non-zero.
*/
void __init efi_mokvar_table_init(void)
{
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
return;
if (efi.mokvar_table == EFI_INVALID_TABLE_ADDR)
return;
/*
* The EFI MOK config table must fit within a single EFI memory
* descriptor range.
*/
err = efi_mem_desc_lookup(efi.mokvar_table, &md);
if (err) {
pr_warn("EFI MOKvar config table is not within the EFI memory map\n");
return;
}
offset_limit = efi_mem_desc_end(&md) - efi.mokvar_table;
/*
* Validate the MOK config table. Since there is no table header
* from which we could get the total size of the MOK config table,
* we compute the total size as we validate each variably sized
* entry, remapping as necessary.
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
mokvar_entry = va + cur_offset;
map_size_needed = cur_offset + sizeof(*mokvar_entry);
if (map_size_needed > map_size) {
if (va)
early_memunmap(va, map_size);
/*
* Map a little more than the fixed size entry
* header, anticipating some data. It's safe to
* do so as long as we stay within current memory
* descriptor.
*/
map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
offset_limit);
va = early_memremap(efi.mokvar_table, map_size);
if (!va) {
pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
efi.mokvar_table, map_size);
return;
}
mokvar_entry = va + cur_offset;
}
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
break;
}
/* Sanity check that the name is null terminated */
size = strnlen(mokvar_entry->name,
sizeof(mokvar_entry->name));
if (size >= sizeof(mokvar_entry->name))
break;
/* Advance to the next entry */
cur_offset = map_size_needed + mokvar_entry->data_size;
}
if (va)
early_memunmap(va, map_size);
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
}
if (md.type == EFI_BOOT_SERVICES_DATA)
efi_mem_reserve(efi.mokvar_table, map_size_needed);
efi_mokvar_table_size = map_size_needed;
}
/*
* efi_mokvar_entry_next() - Get next entry in the EFI MOK config table
*
* mokvar_entry: Pointer to current EFI MOK config table entry
* or null. Null indicates get first entry.
* Passed by reference. This is updated to the
* same value as the return value.
*
* Returns: Pointer to next EFI MOK config table entry
* or null, if there are no more entries.
* Same value is returned in the mokvar_entry
* parameter.
*
* This routine depends on the EFI MOK config table being entirely
* mapped with it's starting virtual address in efi_mokvar_table_va.
*/
struct efi_mokvar_table_entry *efi_mokvar_entry_next(
struct efi_mokvar_table_entry **mokvar_entry)
{
struct efi_mokvar_table_entry *mokvar_cur;
struct efi_mokvar_table_entry *mokvar_next;
size_t size_cur;
mokvar_cur = *mokvar_entry;
*mokvar_entry = NULL;
if (efi_mokvar_table_va == NULL)
return NULL;
if (mokvar_cur == NULL) {
mokvar_next = efi_mokvar_table_va;
} else {
if (mokvar_cur->name[0] == '\0')
return NULL;
size_cur = sizeof(*mokvar_cur) + mokvar_cur->data_size;
mokvar_next = (void *)mokvar_cur + size_cur;
}
if (mokvar_next->name[0] == '\0')
return NULL;
*mokvar_entry = mokvar_next;
return mokvar_next;
}
/*
* efi_mokvar_entry_find() - Find EFI MOK config entry by name
*
* name: Name of the entry to look for.
*
* Returns: Pointer to EFI MOK config table entry if found;
* null otherwise.
*
* This routine depends on the EFI MOK config table being entirely
* mapped with it's starting virtual address in efi_mokvar_table_va.
*/
struct efi_mokvar_table_entry *efi_mokvar_entry_find(const char *name)
{
struct efi_mokvar_table_entry *mokvar_entry = NULL;
while (efi_mokvar_entry_next(&mokvar_entry)) {
if (!strncmp(name, mokvar_entry->name,
sizeof(mokvar_entry->name)))
return mokvar_entry;
}
return NULL;
}
/*
* efi_mokvar_sysfs_read() - sysfs binary file read routine
*
* Returns: Count of bytes read.
*
* Copy EFI MOK config table entry data for this mokvar sysfs binary file
* to the supplied buffer, starting at the specified offset into mokvar table
* entry data, for the specified count bytes. The copy is limited by the
* amount of data in this mokvar config table entry.
*/
static ssize_t efi_mokvar_sysfs_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct efi_mokvar_table_entry *mokvar_entry = bin_attr->private;
if (!capable(CAP_SYS_ADMIN))
return 0;
if (off >= mokvar_entry->data_size)
return 0;
if (count > mokvar_entry->data_size - off)
count = mokvar_entry->data_size - off;
memcpy(buf, mokvar_entry->data + off, count);
return count;
}
/*
* efi_mokvar_sysfs_init() - Map EFI MOK config table and create sysfs
*
* Map the EFI MOK variable config table for run-time use by the kernel
* and create the sysfs entries in /sys/firmware/efi/mok-variables/
*
* This routine just returns if a valid EFI MOK variable config table
* was not found earlier during boot.
*
* This routine must be called during a "middle" initcall phase, i.e.
* after efi_mokvar_table_init() but before UEFI certs are loaded
* during late init.
*
* Implicit inputs:
* efi.mokvar_table: Physical address of EFI MOK variable config table
* or special value that indicates no such table.
*
* efi_mokvar_table_size: Computed size of EFI MOK variable config table.
* The table is considered present and valid if this
* is non-zero.
*
* Implicit outputs:
* efi_mokvar_table_va: Start virtual address of the EFI MOK config table.
*/
static int __init efi_mokvar_sysfs_init(void)
{
void *config_va;
struct efi_mokvar_table_entry *mokvar_entry = NULL;
struct efi_mokvar_sysfs_attr *mokvar_sysfs = NULL;
int err = 0;
if (efi_mokvar_table_size == 0)
return -ENOENT;
config_va = memremap(efi.mokvar_table, efi_mokvar_table_size,
MEMREMAP_WB);
if (!config_va) {
pr_err("Failed to map EFI MOKvar config table\n");
return -ENOMEM;
}
efi_mokvar_table_va = config_va;
mokvar_kobj = kobject_create_and_add("mok-variables", efi_kobj);
if (!mokvar_kobj) {
pr_err("Failed to create EFI mok-variables sysfs entry\n");
return -ENOMEM;
}
while (efi_mokvar_entry_next(&mokvar_entry)) {
mokvar_sysfs = kzalloc(sizeof(*mokvar_sysfs), GFP_KERNEL);
if (!mokvar_sysfs) {
err = -ENOMEM;
break;
}
sysfs_bin_attr_init(&mokvar_sysfs->bin_attr);
mokvar_sysfs->bin_attr.private = mokvar_entry;
mokvar_sysfs->bin_attr.attr.name = mokvar_entry->name;
mokvar_sysfs->bin_attr.attr.mode = 0400;
mokvar_sysfs->bin_attr.size = mokvar_entry->data_size;
mokvar_sysfs->bin_attr.read = efi_mokvar_sysfs_read;
err = sysfs_create_bin_file(mokvar_kobj,
&mokvar_sysfs->bin_attr);
if (err)
break;
list_add_tail(&mokvar_sysfs->node, &efi_mokvar_sysfs_list);
}
if (err) {
pr_err("Failed to create some EFI mok-variables sysfs entries\n");
kfree(mokvar_sysfs);
}
return err;
}
fs_initcall(efi_mokvar_sysfs_init);
| linux-master | drivers/firmware/efi/mokvar-table.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018, Advanced Micro Devices, Inc.
#include <linux/cper.h>
#include <linux/acpi.h>
/*
* We don't need a "CPER_IA" prefix since these are all locally defined.
* This will save us a lot of line space.
*/
#define VALID_LAPIC_ID BIT_ULL(0)
#define VALID_CPUID_INFO BIT_ULL(1)
#define VALID_PROC_ERR_INFO_NUM(bits) (((bits) & GENMASK_ULL(7, 2)) >> 2)
#define VALID_PROC_CXT_INFO_NUM(bits) (((bits) & GENMASK_ULL(13, 8)) >> 8)
#define INFO_ERR_STRUCT_TYPE_CACHE \
GUID_INIT(0xA55701F5, 0xE3EF, 0x43DE, 0xAC, 0x72, 0x24, 0x9B, \
0x57, 0x3F, 0xAD, 0x2C)
#define INFO_ERR_STRUCT_TYPE_TLB \
GUID_INIT(0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B, \
0x9A, 0xDB, 0x63, 0xC3)
#define INFO_ERR_STRUCT_TYPE_BUS \
GUID_INIT(0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF, \
0x92, 0xFF, 0xA6, 0x3C)
#define INFO_ERR_STRUCT_TYPE_MS \
GUID_INIT(0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5, \
0xB0, 0xA7, 0x43, 0x14)
#define INFO_VALID_CHECK_INFO BIT_ULL(0)
#define INFO_VALID_TARGET_ID BIT_ULL(1)
#define INFO_VALID_REQUESTOR_ID BIT_ULL(2)
#define INFO_VALID_RESPONDER_ID BIT_ULL(3)
#define INFO_VALID_IP BIT_ULL(4)
#define CHECK_VALID_TRANS_TYPE BIT_ULL(0)
#define CHECK_VALID_OPERATION BIT_ULL(1)
#define CHECK_VALID_LEVEL BIT_ULL(2)
#define CHECK_VALID_PCC BIT_ULL(3)
#define CHECK_VALID_UNCORRECTED BIT_ULL(4)
#define CHECK_VALID_PRECISE_IP BIT_ULL(5)
#define CHECK_VALID_RESTARTABLE_IP BIT_ULL(6)
#define CHECK_VALID_OVERFLOW BIT_ULL(7)
#define CHECK_VALID_BUS_PART_TYPE BIT_ULL(8)
#define CHECK_VALID_BUS_TIME_OUT BIT_ULL(9)
#define CHECK_VALID_BUS_ADDR_SPACE BIT_ULL(10)
#define CHECK_VALID_BITS(check) (((check) & GENMASK_ULL(15, 0)))
#define CHECK_TRANS_TYPE(check) (((check) & GENMASK_ULL(17, 16)) >> 16)
#define CHECK_OPERATION(check) (((check) & GENMASK_ULL(21, 18)) >> 18)
#define CHECK_LEVEL(check) (((check) & GENMASK_ULL(24, 22)) >> 22)
#define CHECK_PCC BIT_ULL(25)
#define CHECK_UNCORRECTED BIT_ULL(26)
#define CHECK_PRECISE_IP BIT_ULL(27)
#define CHECK_RESTARTABLE_IP BIT_ULL(28)
#define CHECK_OVERFLOW BIT_ULL(29)
#define CHECK_BUS_PART_TYPE(check) (((check) & GENMASK_ULL(31, 30)) >> 30)
#define CHECK_BUS_TIME_OUT BIT_ULL(32)
#define CHECK_BUS_ADDR_SPACE(check) (((check) & GENMASK_ULL(34, 33)) >> 33)
#define CHECK_VALID_MS_ERR_TYPE BIT_ULL(0)
#define CHECK_VALID_MS_PCC BIT_ULL(1)
#define CHECK_VALID_MS_UNCORRECTED BIT_ULL(2)
#define CHECK_VALID_MS_PRECISE_IP BIT_ULL(3)
#define CHECK_VALID_MS_RESTARTABLE_IP BIT_ULL(4)
#define CHECK_VALID_MS_OVERFLOW BIT_ULL(5)
#define CHECK_MS_ERR_TYPE(check) (((check) & GENMASK_ULL(18, 16)) >> 16)
#define CHECK_MS_PCC BIT_ULL(19)
#define CHECK_MS_UNCORRECTED BIT_ULL(20)
#define CHECK_MS_PRECISE_IP BIT_ULL(21)
#define CHECK_MS_RESTARTABLE_IP BIT_ULL(22)
#define CHECK_MS_OVERFLOW BIT_ULL(23)
#define CTX_TYPE_MSR 1
#define CTX_TYPE_MMREG 7
enum err_types {
ERR_TYPE_CACHE = 0,
ERR_TYPE_TLB,
ERR_TYPE_BUS,
ERR_TYPE_MS,
N_ERR_TYPES
};
static enum err_types cper_get_err_type(const guid_t *err_type)
{
if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_CACHE))
return ERR_TYPE_CACHE;
else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_TLB))
return ERR_TYPE_TLB;
else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_BUS))
return ERR_TYPE_BUS;
else if (guid_equal(err_type, &INFO_ERR_STRUCT_TYPE_MS))
return ERR_TYPE_MS;
else
return N_ERR_TYPES;
}
static const char * const ia_check_trans_type_strs[] = {
"Instruction",
"Data Access",
"Generic",
};
static const char * const ia_check_op_strs[] = {
"generic error",
"generic read",
"generic write",
"data read",
"data write",
"instruction fetch",
"prefetch",
"eviction",
"snoop",
};
static const char * const ia_check_bus_part_type_strs[] = {
"Local Processor originated request",
"Local Processor responded to request",
"Local Processor observed",
"Generic",
};
static const char * const ia_check_bus_addr_space_strs[] = {
"Memory Access",
"Reserved",
"I/O",
"Other Transaction",
};
static const char * const ia_check_ms_error_type_strs[] = {
"No Error",
"Unclassified",
"Microcode ROM Parity Error",
"External Error",
"FRC Error",
"Internal Unclassified",
};
static const char * const ia_reg_ctx_strs[] = {
"Unclassified Data",
"MSR Registers (Machine Check and other MSRs)",
"32-bit Mode Execution Context",
"64-bit Mode Execution Context",
"FXSAVE Context",
"32-bit Mode Debug Registers (DR0-DR7)",
"64-bit Mode Debug Registers (DR0-DR7)",
"Memory Mapped Registers",
};
static inline void print_bool(char *str, const char *pfx, u64 check, u64 bit)
{
printk("%s%s: %s\n", pfx, str, (check & bit) ? "true" : "false");
}
static void print_err_info_ms(const char *pfx, u16 validation_bits, u64 check)
{
if (validation_bits & CHECK_VALID_MS_ERR_TYPE) {
u8 err_type = CHECK_MS_ERR_TYPE(check);
printk("%sError Type: %u, %s\n", pfx, err_type,
err_type < ARRAY_SIZE(ia_check_ms_error_type_strs) ?
ia_check_ms_error_type_strs[err_type] : "unknown");
}
if (validation_bits & CHECK_VALID_MS_PCC)
print_bool("Processor Context Corrupt", pfx, check, CHECK_MS_PCC);
if (validation_bits & CHECK_VALID_MS_UNCORRECTED)
print_bool("Uncorrected", pfx, check, CHECK_MS_UNCORRECTED);
if (validation_bits & CHECK_VALID_MS_PRECISE_IP)
print_bool("Precise IP", pfx, check, CHECK_MS_PRECISE_IP);
if (validation_bits & CHECK_VALID_MS_RESTARTABLE_IP)
print_bool("Restartable IP", pfx, check, CHECK_MS_RESTARTABLE_IP);
if (validation_bits & CHECK_VALID_MS_OVERFLOW)
print_bool("Overflow", pfx, check, CHECK_MS_OVERFLOW);
}
static void print_err_info(const char *pfx, u8 err_type, u64 check)
{
u16 validation_bits = CHECK_VALID_BITS(check);
/*
* The MS Check structure varies a lot from the others, so use a
* separate function for decoding.
*/
if (err_type == ERR_TYPE_MS)
return print_err_info_ms(pfx, validation_bits, check);
if (validation_bits & CHECK_VALID_TRANS_TYPE) {
u8 trans_type = CHECK_TRANS_TYPE(check);
printk("%sTransaction Type: %u, %s\n", pfx, trans_type,
trans_type < ARRAY_SIZE(ia_check_trans_type_strs) ?
ia_check_trans_type_strs[trans_type] : "unknown");
}
if (validation_bits & CHECK_VALID_OPERATION) {
u8 op = CHECK_OPERATION(check);
/*
* CACHE has more operation types than TLB or BUS, though the
* name and the order are the same.
*/
u8 max_ops = (err_type == ERR_TYPE_CACHE) ? 9 : 7;
printk("%sOperation: %u, %s\n", pfx, op,
op < max_ops ? ia_check_op_strs[op] : "unknown");
}
if (validation_bits & CHECK_VALID_LEVEL)
printk("%sLevel: %llu\n", pfx, CHECK_LEVEL(check));
if (validation_bits & CHECK_VALID_PCC)
print_bool("Processor Context Corrupt", pfx, check, CHECK_PCC);
if (validation_bits & CHECK_VALID_UNCORRECTED)
print_bool("Uncorrected", pfx, check, CHECK_UNCORRECTED);
if (validation_bits & CHECK_VALID_PRECISE_IP)
print_bool("Precise IP", pfx, check, CHECK_PRECISE_IP);
if (validation_bits & CHECK_VALID_RESTARTABLE_IP)
print_bool("Restartable IP", pfx, check, CHECK_RESTARTABLE_IP);
if (validation_bits & CHECK_VALID_OVERFLOW)
print_bool("Overflow", pfx, check, CHECK_OVERFLOW);
if (err_type != ERR_TYPE_BUS)
return;
if (validation_bits & CHECK_VALID_BUS_PART_TYPE) {
u8 part_type = CHECK_BUS_PART_TYPE(check);
printk("%sParticipation Type: %u, %s\n", pfx, part_type,
part_type < ARRAY_SIZE(ia_check_bus_part_type_strs) ?
ia_check_bus_part_type_strs[part_type] : "unknown");
}
if (validation_bits & CHECK_VALID_BUS_TIME_OUT)
print_bool("Time Out", pfx, check, CHECK_BUS_TIME_OUT);
if (validation_bits & CHECK_VALID_BUS_ADDR_SPACE) {
u8 addr_space = CHECK_BUS_ADDR_SPACE(check);
printk("%sAddress Space: %u, %s\n", pfx, addr_space,
addr_space < ARRAY_SIZE(ia_check_bus_addr_space_strs) ?
ia_check_bus_addr_space_strs[addr_space] : "unknown");
}
}
void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
{
int i;
struct cper_ia_err_info *err_info;
struct cper_ia_proc_ctx *ctx_info;
char newpfx[64], infopfx[64];
u8 err_type;
if (proc->validation_bits & VALID_LAPIC_ID)
printk("%sLocal APIC_ID: 0x%llx\n", pfx, proc->lapic_id);
if (proc->validation_bits & VALID_CPUID_INFO) {
printk("%sCPUID Info:\n", pfx);
print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, proc->cpuid,
sizeof(proc->cpuid), 0);
}
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
err_info = (struct cper_ia_err_info *)(proc + 1);
for (i = 0; i < VALID_PROC_ERR_INFO_NUM(proc->validation_bits); i++) {
printk("%sError Information Structure %d:\n", pfx, i);
err_type = cper_get_err_type(&err_info->err_type);
printk("%sError Structure Type: %s\n", newpfx,
err_type < ARRAY_SIZE(cper_proc_error_type_strs) ?
cper_proc_error_type_strs[err_type] : "unknown");
if (err_type >= N_ERR_TYPES) {
printk("%sError Structure Type: %pUl\n", newpfx,
&err_info->err_type);
}
if (err_info->validation_bits & INFO_VALID_CHECK_INFO) {
printk("%sCheck Information: 0x%016llx\n", newpfx,
err_info->check_info);
if (err_type < N_ERR_TYPES) {
snprintf(infopfx, sizeof(infopfx), "%s ",
newpfx);
print_err_info(infopfx, err_type,
err_info->check_info);
}
}
if (err_info->validation_bits & INFO_VALID_TARGET_ID) {
printk("%sTarget Identifier: 0x%016llx\n",
newpfx, err_info->target_id);
}
if (err_info->validation_bits & INFO_VALID_REQUESTOR_ID) {
printk("%sRequestor Identifier: 0x%016llx\n",
newpfx, err_info->requestor_id);
}
if (err_info->validation_bits & INFO_VALID_RESPONDER_ID) {
printk("%sResponder Identifier: 0x%016llx\n",
newpfx, err_info->responder_id);
}
if (err_info->validation_bits & INFO_VALID_IP) {
printk("%sInstruction Pointer: 0x%016llx\n",
newpfx, err_info->ip);
}
err_info++;
}
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
printk("%sRegister Context Type: %s\n", newpfx,
ctx_info->reg_ctx_type < ARRAY_SIZE(ia_reg_ctx_strs) ?
ia_reg_ctx_strs[ctx_info->reg_ctx_type] : "unknown");
printk("%sRegister Array Size: 0x%04x\n", newpfx,
ctx_info->reg_arr_size);
if (ctx_info->reg_ctx_type == CTX_TYPE_MSR) {
groupsize = 8; /* MSRs are 8 bytes wide. */
printk("%sMSR Address: 0x%08x\n", newpfx,
ctx_info->msr_addr);
}
if (ctx_info->reg_ctx_type == CTX_TYPE_MMREG) {
printk("%sMM Register Address: 0x%016llx\n", newpfx,
ctx_info->mm_reg_addr);
}
if (ctx_info->reg_ctx_type != CTX_TYPE_MSR ||
arch_apei_report_x86_error(ctx_info, proc->lapic_id)) {
printk("%sRegister Array:\n", newpfx);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16,
groupsize, (ctx_info + 1),
ctx_info->reg_arr_size, 0);
}
ctx_info = (struct cper_ia_proc_ctx *)((long)ctx_info + size);
}
}
| linux-master | drivers/firmware/efi/cper-x86.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Google, Inc.
* Thiebaud Weksteen <[email protected]>
*/
#define TPM_MEMREMAP(start, size) early_memremap(start, size)
#define TPM_MEMUNMAP(start, size) early_memunmap(start, size)
#include <asm/early_ioremap.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/tpm_eventlog.h>
int efi_tpm_final_log_size;
EXPORT_SYMBOL(efi_tpm_final_log_size);
static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
{
struct tcg_pcr_event2_head *header;
int event_size, size = 0;
while (count > 0) {
header = data + size;
event_size = __calc_tpm2_event_size(header, size_info, true);
if (event_size == 0)
return -1;
size += event_size;
count--;
}
return size;
}
/*
* Reserve the memory associated with the TPM Event Log configuration table.
*/
int __init efi_tpm_eventlog_init(void)
{
struct linux_efi_tpm_eventlog *log_tbl;
struct efi_tcg2_final_events_table *final_tbl;
int tbl_size;
int ret = 0;
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
/*
* We can't calculate the size of the final events without the
* first entry in the TPM log, so bail here.
*/
return 0;
}
log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
if (!log_tbl) {
pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
efi.tpm_log);
efi.tpm_log = EFI_INVALID_TABLE_ADDR;
return -ENOMEM;
}
tbl_size = sizeof(*log_tbl) + log_tbl->size;
memblock_reserve(efi.tpm_log, tbl_size);
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) {
pr_info("TPM Final Events table not present\n");
goto out;
} else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
pr_warn(FW_BUG "TPM Final Events table invalid\n");
goto out;
}
final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
if (!final_tbl) {
pr_err("Failed to map TPM Final Event Log table @ 0x%lx\n",
efi.tpm_final_log);
efi.tpm_final_log = EFI_INVALID_TABLE_ADDR;
ret = -ENOMEM;
goto out;
}
tbl_size = 0;
if (final_tbl->nr_events != 0) {
void *events = (void *)efi.tpm_final_log
+ sizeof(final_tbl->version)
+ sizeof(final_tbl->nr_events);
tbl_size = tpm2_calc_event_log_size(events,
final_tbl->nr_events,
log_tbl->log);
}
if (tbl_size < 0) {
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
ret = -EINVAL;
goto out_calc;
}
memblock_reserve(efi.tpm_final_log,
tbl_size + sizeof(*final_tbl));
efi_tpm_final_log_size = tbl_size;
out_calc:
early_memunmap(final_tbl, sizeof(*final_tbl));
out:
early_memunmap(log_tbl, sizeof(*log_tbl));
return ret;
}
| linux-master | drivers/firmware/efi/tpm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
* Copyright (C) 2017, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/cper.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/printk.h>
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
static const char * const arm_reg_ctx_strs[] = {
"AArch32 general purpose registers",
"AArch32 EL1 context registers",
"AArch32 EL2 context registers",
"AArch32 secure context registers",
"AArch64 general purpose registers",
"AArch64 EL1 context registers",
"AArch64 EL2 context registers",
"AArch64 EL3 context registers",
"Misc. system register structure",
};
static const char * const arm_err_trans_type_strs[] = {
"Instruction",
"Data Access",
"Generic",
};
static const char * const arm_bus_err_op_strs[] = {
"Generic error (type cannot be determined)",
"Generic read (type of instruction or data request cannot be determined)",
"Generic write (type of instruction of data request cannot be determined)",
"Data read",
"Data write",
"Instruction fetch",
"Prefetch",
};
static const char * const arm_cache_err_op_strs[] = {
"Generic error (type cannot be determined)",
"Generic read (type of instruction or data request cannot be determined)",
"Generic write (type of instruction of data request cannot be determined)",
"Data read",
"Data write",
"Instruction fetch",
"Prefetch",
"Eviction",
"Snooping (processor initiated a cache snoop that resulted in an error)",
"Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
"Management",
};
static const char * const arm_tlb_err_op_strs[] = {
"Generic error (type cannot be determined)",
"Generic read (type of instruction or data request cannot be determined)",
"Generic write (type of instruction of data request cannot be determined)",
"Data read",
"Data write",
"Instruction fetch",
"Prefetch",
"Local management operation (processor initiated a TLB management operation that resulted in an error)",
"External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
};
static const char * const arm_bus_err_part_type_strs[] = {
"Local processor originated request",
"Local processor responded to request",
"Local processor observed",
"Generic",
};
static const char * const arm_bus_err_addr_space_strs[] = {
"External Memory Access",
"Internal Memory Access",
"Unknown",
"Device Memory Access",
};
static void cper_print_arm_err_info(const char *pfx, u32 type,
u64 error_info)
{
u8 trans_type, op_type, level, participation_type, address_space;
u16 mem_attributes;
bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
bool time_out, access_mode;
/* If the type is unknown, bail. */
if (type > CPER_ARM_MAX_TYPE)
return;
/*
* Vendor type errors have error information values that are vendor
* specific.
*/
if (type == CPER_ARM_VENDOR_ERROR)
return;
if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
& CPER_ARM_ERR_TRANSACTION_MASK);
if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
printk("%stransaction type: %s\n", pfx,
arm_err_trans_type_strs[trans_type]);
}
}
if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
& CPER_ARM_ERR_OPERATION_MASK);
switch (type) {
case CPER_ARM_CACHE_ERROR:
if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
printk("%soperation type: %s\n", pfx,
arm_cache_err_op_strs[op_type]);
}
break;
case CPER_ARM_TLB_ERROR:
if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
printk("%soperation type: %s\n", pfx,
arm_tlb_err_op_strs[op_type]);
}
break;
case CPER_ARM_BUS_ERROR:
if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
printk("%soperation type: %s\n", pfx,
arm_bus_err_op_strs[op_type]);
}
break;
}
}
if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
& CPER_ARM_ERR_LEVEL_MASK);
switch (type) {
case CPER_ARM_CACHE_ERROR:
printk("%scache level: %d\n", pfx, level);
break;
case CPER_ARM_TLB_ERROR:
printk("%sTLB level: %d\n", pfx, level);
break;
case CPER_ARM_BUS_ERROR:
printk("%saffinity level at which the bus error occurred: %d\n",
pfx, level);
break;
}
}
if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
& CPER_ARM_ERR_PC_CORRUPT_MASK);
if (proc_context_corrupt)
printk("%sprocessor context corrupted\n", pfx);
else
printk("%sprocessor context not corrupted\n", pfx);
}
if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
& CPER_ARM_ERR_CORRECTED_MASK);
if (corrected)
printk("%sthe error has been corrected\n", pfx);
else
printk("%sthe error has not been corrected\n", pfx);
}
if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
& CPER_ARM_ERR_PRECISE_PC_MASK);
if (precise_pc)
printk("%sPC is precise\n", pfx);
else
printk("%sPC is imprecise\n", pfx);
}
if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
& CPER_ARM_ERR_RESTARTABLE_PC_MASK);
if (restartable_pc)
printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
}
/* The rest of the fields are specific to bus errors */
if (type != CPER_ARM_BUS_ERROR)
return;
if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
& CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
printk("%sparticipation type: %s\n", pfx,
arm_bus_err_part_type_strs[participation_type]);
}
}
if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
& CPER_ARM_ERR_TIME_OUT_MASK);
if (time_out)
printk("%srequest timed out\n", pfx);
}
if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
& CPER_ARM_ERR_ADDRESS_SPACE_MASK);
if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
printk("%saddress space: %s\n", pfx,
arm_bus_err_addr_space_strs[address_space]);
}
}
if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
& CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
}
if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
& CPER_ARM_ERR_ACCESS_MODE_MASK);
if (access_mode)
printk("%saccess mode: normal\n", pfx);
else
printk("%saccess mode: secure\n", pfx);
}
}
void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc)
{
int i, len, max_ctx_type;
struct cper_arm_err_info *err_info;
struct cper_arm_ctx_info *ctx_info;
char newpfx[64], infopfx[64];
printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
len = proc->section_length - (sizeof(*proc) +
proc->err_info_num * (sizeof(*err_info)));
if (len < 0) {
printk("%ssection length: %d\n", pfx, proc->section_length);
printk("%ssection length is too small\n", pfx);
printk("%sfirmware-generated error record is incorrect\n", pfx);
printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
return;
}
if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
pfx, proc->mpidr);
if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
printk("%serror affinity level: %d\n", pfx,
proc->affinity_level);
if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
printk("%srunning state: 0x%x\n", pfx, proc->running_state);
printk("%sPower State Coordination Interface state: %d\n",
pfx, proc->psci_state);
}
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
err_info = (struct cper_arm_err_info *)(proc + 1);
for (i = 0; i < proc->err_info_num; i++) {
printk("%sError info structure %d:\n", pfx, i);
printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
printk("%sfirst error captured\n", newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
printk("%slast error captured\n", newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
printk("%spropagated error captured\n",
newpfx);
if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
printk("%soverflow occurred, error info is incomplete\n",
newpfx);
}
printk("%serror_type: %d, %s\n", newpfx, err_info->type,
err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
cper_proc_error_type_strs[err_info->type] : "unknown");
if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
printk("%serror_info: 0x%016llx\n", newpfx,
err_info->error_info);
snprintf(infopfx, sizeof(infopfx), "%s ", newpfx);
cper_print_arm_err_info(infopfx, err_info->type,
err_info->error_info);
}
if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
printk("%svirtual fault address: 0x%016llx\n",
newpfx, err_info->virt_fault_addr);
if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
printk("%sphysical fault address: 0x%016llx\n",
newpfx, err_info->physical_fault_addr);
err_info += 1;
}
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
int size = sizeof(*ctx_info) + ctx_info->size;
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
printk("%ssection length is too small\n", newpfx);
printk("%sfirmware-generated error record is incorrect\n", pfx);
return;
}
if (ctx_info->type > max_ctx_type) {
printk("%sInvalid context type: %d (max: %d)\n",
newpfx, ctx_info->type, max_ctx_type);
return;
}
printk("%sregister context type: %s\n", newpfx,
arm_reg_ctx_strs[ctx_info->type]);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
(ctx_info + 1), ctx_info->size, 0);
len -= size;
ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
}
if (len > 0) {
printk("%sVendor specific error info has %u bytes:\n", pfx,
len);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
len, true);
}
}
| linux-master | drivers/firmware/efi/cper-arm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <[email protected]>
*
* Properties are stored either as:
* u8 arrays which can be retrieved with device_property_read_u8_array() or
* booleans which can be queried with device_property_present().
*/
#define pr_fmt(fmt) "apple-properties: " fmt
#include <linux/memblock.h>
#include <linux/efi.h>
#include <linux/io.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
#include <asm/setup.h>
static bool dump_properties __initdata;
static int __init dump_properties_enable(char *arg)
{
dump_properties = true;
return 1;
}
__setup("dump_apple_properties", dump_properties_enable);
struct dev_header {
u32 len;
u32 prop_count;
struct efi_dev_path path[];
/*
* followed by key/value pairs, each key and value preceded by u32 len,
* len includes itself, value may be empty (in which case its len is 4)
*/
};
struct properties_header {
u32 len;
u32 version;
u32 dev_count;
struct dev_header dev_header[];
};
static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
struct device *dev, const void *ptr,
struct property_entry entry[])
{
int i;
for (i = 0; i < dev_header->prop_count; i++) {
int remaining = dev_header->len - (ptr - (void *)dev_header);
u32 key_len, val_len, entry_len;
const u8 *entry_data;
char *key;
if (sizeof(key_len) > remaining)
break;
key_len = *(typeof(key_len) *)ptr;
if (key_len + sizeof(val_len) > remaining ||
key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
*(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
dev_err(dev, "invalid property name len at %#zx\n",
ptr - (void *)dev_header);
break;
}
val_len = *(typeof(val_len) *)(ptr + key_len);
if (key_len + val_len > remaining ||
val_len < sizeof(val_len)) {
dev_err(dev, "invalid property val len at %#zx\n",
ptr - (void *)dev_header + key_len);
break;
}
/* 4 bytes to accommodate UTF-8 code points + null byte */
key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
if (!key) {
dev_err(dev, "cannot allocate property name\n");
break;
}
ucs2_as_utf8(key, ptr + sizeof(key_len),
key_len - sizeof(key_len));
entry_data = ptr + key_len + sizeof(val_len);
entry_len = val_len - sizeof(val_len);
if (entry_len)
entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
entry_len);
else
entry[i] = PROPERTY_ENTRY_BOOL(key);
if (dump_properties) {
dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, entry_data, entry_len, true);
}
ptr += key_len + val_len;
}
if (i != dev_header->prop_count) {
dev_err(dev, "got %d device properties, expected %u\n", i,
dev_header->prop_count);
print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, dev_header, dev_header->len, true);
return;
}
dev_info(dev, "assigning %d device properties\n", i);
}
static int __init unmarshal_devices(struct properties_header *properties)
{
size_t offset = offsetof(struct properties_header, dev_header[0]);
while (offset + sizeof(struct dev_header) < properties->len) {
struct dev_header *dev_header = (void *)properties + offset;
struct property_entry *entry = NULL;
const struct efi_dev_path *ptr;
struct device *dev;
size_t len;
int ret, i;
if (offset + dev_header->len > properties->len ||
dev_header->len <= sizeof(*dev_header)) {
pr_err("invalid len in dev_header at %#zx\n", offset);
return -EINVAL;
}
ptr = dev_header->path;
len = dev_header->len - sizeof(*dev_header);
dev = efi_get_device_by_path(&ptr, &len);
if (IS_ERR(dev)) {
pr_err("device path parse error %ld at %#zx:\n",
PTR_ERR(dev), (void *)ptr - (void *)dev_header);
print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, dev_header, dev_header->len, true);
dev = NULL;
goto skip_device;
}
entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
GFP_KERNEL);
if (!entry) {
dev_err(dev, "cannot allocate properties\n");
goto skip_device;
}
unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
if (!entry[0].name)
goto skip_device;
ret = device_create_managed_software_node(dev, entry, NULL);
if (ret)
dev_err(dev, "error %d assigning properties\n", ret);
for (i = 0; entry[i].name; i++)
kfree(entry[i].name);
skip_device:
kfree(entry);
put_device(dev);
offset += dev_header->len;
}
return 0;
}
static int __init map_properties(void)
{
struct properties_header *properties;
struct setup_data *data;
u32 data_len;
u64 pa_data;
int ret;
if (!x86_apple_machine)
return 0;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
if (!data) {
pr_err("cannot map setup_data header\n");
return -ENOMEM;
}
if (data->type != SETUP_APPLE_PROPERTIES) {
pa_data = data->next;
memunmap(data);
continue;
}
data_len = data->len;
memunmap(data);
data = memremap(pa_data, sizeof(*data) + data_len, MEMREMAP_WB);
if (!data) {
pr_err("cannot map setup_data payload\n");
return -ENOMEM;
}
properties = (struct properties_header *)data->data;
if (properties->version != 1) {
pr_err("unsupported version:\n");
print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, properties, data_len, true);
ret = -ENOTSUPP;
} else if (properties->len != data_len) {
pr_err("length mismatch, expected %u\n", data_len);
print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
16, 1, properties, data_len, true);
ret = -EINVAL;
} else
ret = unmarshal_devices(properties);
/*
* Can only free the setup_data payload but not its header
* to avoid breaking the chain of ->next pointers.
*/
data->len = 0;
memunmap(data);
memblock_free_late(pa_data + sizeof(*data), data_len);
return ret;
}
return 0;
}
fs_initcall(map_properties);
| linux-master | drivers/firmware/efi/apple-properties.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <asm/unaccepted_memory.h>
/* Protects unaccepted memory bitmap */
static DEFINE_SPINLOCK(unaccepted_memory_lock);
/*
* accept_memory() -- Consult bitmap and accept the memory if needed.
*
* Only memory that is explicitly marked as unaccepted in the bitmap requires
* an action. All the remaining memory is implicitly accepted and doesn't need
* acceptance.
*
* No need to accept:
* - anything if the system has no unaccepted table;
* - memory that is below phys_base;
* - memory that is above the memory that addressable by the bitmap;
*/
void accept_memory(phys_addr_t start, phys_addr_t end)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
unsigned long flags;
u64 unit_size;
unaccepted = efi_get_unaccepted_table();
if (!unaccepted)
return;
unit_size = unaccepted->unit_size;
/*
* Only care for the part of the range that is represented
* in the bitmap.
*/
if (start < unaccepted->phys_base)
start = unaccepted->phys_base;
if (end < unaccepted->phys_base)
return;
/* Translate to offsets from the beginning of the bitmap */
start -= unaccepted->phys_base;
end -= unaccepted->phys_base;
/*
* load_unaligned_zeropad() can lead to unwanted loads across page
* boundaries. The unwanted loads are typically harmless. But, they
* might be made to totally unrelated or even unmapped memory.
* load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
* #VE) to recover from these unwanted loads.
*
* But, this approach does not work for unaccepted memory. For TDX, a
* load from unaccepted memory will not lead to a recoverable exception
* within the guest. The guest will exit to the VMM where the only
* recourse is to terminate the guest.
*
* There are two parts to fix this issue and comprehensively avoid
* access to unaccepted memory. Together these ensure that an extra
* "guard" page is accepted in addition to the memory that needs to be
* used:
*
* 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
* checks up to end+unit_size if 'end' is aligned on a unit_size
* boundary.
*
* 2. Implicitly extend accept_memory(start, end) to end+unit_size if
* 'end' is aligned on a unit_size boundary. (immediately following
* this comment)
*/
if (!(end % unit_size))
end += unit_size;
/* Make sure not to overrun the bitmap */
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
range_start = start / unit_size;
spin_lock_irqsave(&unaccepted_memory_lock, flags);
for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
DIV_ROUND_UP(end, unit_size)) {
unsigned long phys_start, phys_end;
unsigned long len = range_end - range_start;
phys_start = range_start * unit_size + unaccepted->phys_base;
phys_end = range_end * unit_size + unaccepted->phys_base;
arch_accept_memory(phys_start, phys_end);
bitmap_clear(unaccepted->bitmap, range_start, len);
}
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long flags;
bool ret = false;
u64 unit_size;
unaccepted = efi_get_unaccepted_table();
if (!unaccepted)
return false;
unit_size = unaccepted->unit_size;
/*
* Only care for the part of the range that is represented
* in the bitmap.
*/
if (start < unaccepted->phys_base)
start = unaccepted->phys_base;
if (end < unaccepted->phys_base)
return false;
/* Translate to offsets from the beginning of the bitmap */
start -= unaccepted->phys_base;
end -= unaccepted->phys_base;
/*
* Also consider the unaccepted state of the *next* page. See fix #1 in
* the comment on load_unaligned_zeropad() in accept_memory().
*/
if (!(end % unit_size))
end += unit_size;
/* Make sure not to overrun the bitmap */
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
spin_lock_irqsave(&unaccepted_memory_lock, flags);
while (start < end) {
if (test_bit(start / unit_size, unaccepted->bitmap)) {
ret = true;
break;
}
start += unit_size;
}
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
return ret;
}
| linux-master | drivers/firmware/efi/unaccepted_memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EFI capsule support.
*
* Copyright 2013 Intel Corporation; author Matt Fleming
*/
#define pr_fmt(fmt) "efi: " fmt
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/highmem.h>
#include <linux/efi.h>
#include <linux/vmalloc.h>
#include <asm/efi.h>
#include <asm/io.h>
typedef struct {
u64 length;
u64 data;
} efi_capsule_block_desc_t;
static bool capsule_pending;
static bool stop_capsules;
static int efi_reset_type = -1;
/*
* capsule_mutex serialises access to both capsule_pending and
* efi_reset_type and stop_capsules.
*/
static DEFINE_MUTEX(capsule_mutex);
/**
* efi_capsule_pending - has a capsule been passed to the firmware?
* @reset_type: store the type of EFI reset if capsule is pending
*
* To ensure that the registered capsule is processed correctly by the
* firmware we need to perform a specific type of reset. If a capsule is
* pending return the reset type in @reset_type.
*
* This function will race with callers of efi_capsule_update(), for
* example, calling this function while somebody else is in
* efi_capsule_update() but hasn't reached efi_capsue_update_locked()
* will miss the updates to capsule_pending and efi_reset_type after
* efi_capsule_update_locked() completes.
*
* A non-racy use is from platform reboot code because we use
* system_state to ensure no capsules can be sent to the firmware once
* we're at SYSTEM_RESTART. See efi_capsule_update_locked().
*/
bool efi_capsule_pending(int *reset_type)
{
if (!capsule_pending)
return false;
if (reset_type)
*reset_type = efi_reset_type;
return true;
}
/*
* Whitelist of EFI capsule flags that we support.
*
* We do not handle EFI_CAPSULE_INITIATE_RESET because that would
* require us to prepare the kernel for reboot. Refuse to load any
* capsules with that flag and any other flags that we do not know how
* to handle.
*/
#define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
(EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
/**
* efi_capsule_supported - does the firmware support the capsule?
* @guid: vendor guid of capsule
* @flags: capsule flags
* @size: size of capsule data
* @reset: the reset type required for this capsule
*
* Check whether a capsule with @flags is supported by the firmware
* and that @size doesn't exceed the maximum size for a capsule.
*
* No attempt is made to check @reset against the reset type required
* by any pending capsules because of the races involved.
*/
int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
{
efi_capsule_header_t capsule;
efi_capsule_header_t *cap_list[] = { &capsule };
efi_status_t status;
u64 max_size;
if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
return -EINVAL;
capsule.headersize = capsule.imagesize = sizeof(capsule);
memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
capsule.flags = flags;
status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
if (status != EFI_SUCCESS)
return efi_status_to_err(status);
if (size > max_size)
return -ENOSPC;
return 0;
}
EXPORT_SYMBOL_GPL(efi_capsule_supported);
/*
* Every scatter gather list (block descriptor) page must end with a
* continuation pointer. The last continuation pointer of the last
* page must be zero to mark the end of the chain.
*/
#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
/*
* How many scatter gather list (block descriptor) pages do we need
* to map @count pages?
*/
static inline unsigned int sg_pages_num(unsigned int count)
{
return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
}
/**
* efi_capsule_update_locked - pass a single capsule to the firmware
* @capsule: capsule to send to the firmware
* @sg_pages: array of scatter gather (block descriptor) pages
* @reset: the reset type required for @capsule
*
* Since this function must be called under capsule_mutex check
* whether efi_reset_type will conflict with @reset, and atomically
* set it and capsule_pending if a capsule was successfully sent to
* the firmware.
*
* We also check to see if the system is about to restart, and if so,
* abort. This avoids races between efi_capsule_update() and
* efi_capsule_pending().
*/
static int
efi_capsule_update_locked(efi_capsule_header_t *capsule,
struct page **sg_pages, int reset)
{
efi_physical_addr_t sglist_phys;
efi_status_t status;
lockdep_assert_held(&capsule_mutex);
/*
* If someone has already registered a capsule that requires a
* different reset type, we're out of luck and must abort.
*/
if (efi_reset_type >= 0 && efi_reset_type != reset) {
pr_err("Conflicting capsule reset type %d (%d).\n",
reset, efi_reset_type);
return -EINVAL;
}
/*
* If the system is getting ready to restart it may have
* called efi_capsule_pending() to make decisions (such as
* whether to force an EFI reboot), and we're racing against
* that call. Abort in that case.
*/
if (unlikely(stop_capsules)) {
pr_warn("Capsule update raced with reboot, aborting.\n");
return -EINVAL;
}
sglist_phys = page_to_phys(sg_pages[0]);
status = efi.update_capsule(&capsule, 1, sglist_phys);
if (status == EFI_SUCCESS) {
capsule_pending = true;
efi_reset_type = reset;
}
return efi_status_to_err(status);
}
/**
* efi_capsule_update - send a capsule to the firmware
* @capsule: capsule to send to firmware
* @pages: an array of capsule data pages
*
* Build a scatter gather list with EFI capsule block descriptors to
* map the capsule described by @capsule with its data in @pages and
* send it to the firmware via the UpdateCapsule() runtime service.
*
* @capsule must be a virtual mapping of the complete capsule update in the
* kernel address space, as the capsule can be consumed immediately.
* A capsule_header_t that describes the entire contents of the capsule
* must be at the start of the first data page.
*
* Even though this function will validate that the firmware supports
* the capsule guid, users will likely want to check that
* efi_capsule_supported() returns true before calling this function
* because it makes it easier to print helpful error messages.
*
* If the capsule is successfully submitted to the firmware, any
* subsequent calls to efi_capsule_pending() will return true. @pages
* must not be released or modified if this function returns
* successfully.
*
* Callers must be prepared for this function to fail, which can
* happen if we raced with system reboot or if there is already a
* pending capsule that has a reset type that conflicts with the one
* required by @capsule. Do NOT use efi_capsule_pending() to detect
* this conflict since that would be racy. Instead, submit the capsule
* to efi_capsule_update() and check the return value.
*
* Return 0 on success, a converted EFI status code on failure.
*/
int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
{
u32 imagesize = capsule->imagesize;
efi_guid_t guid = capsule->guid;
unsigned int count, sg_count;
u32 flags = capsule->flags;
struct page **sg_pages;
int rv, reset_type;
int i, j;
rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
if (rv)
return rv;
count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
sg_count = sg_pages_num(count);
sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
if (!sg_pages)
return -ENOMEM;
for (i = 0; i < sg_count; i++) {
sg_pages[i] = alloc_page(GFP_KERNEL);
if (!sg_pages[i]) {
rv = -ENOMEM;
goto out;
}
}
for (i = 0; i < sg_count; i++) {
efi_capsule_block_desc_t *sglist;
sglist = kmap_atomic(sg_pages[i]);
for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
u64 sz = min_t(u64, imagesize,
PAGE_SIZE - (u64)*pages % PAGE_SIZE);
sglist[j].length = sz;
sglist[j].data = *pages++;
imagesize -= sz;
count--;
}
/* Continuation pointer */
sglist[j].length = 0;
if (i + 1 == sg_count)
sglist[j].data = 0;
else
sglist[j].data = page_to_phys(sg_pages[i + 1]);
#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/*
* At runtime, the firmware has no way to find out where the
* sglist elements are mapped, if they are mapped in the first
* place. Therefore, on architectures that can only perform
* cache maintenance by virtual address, the firmware is unable
* to perform this maintenance, and so it is up to the OS to do
* it instead.
*/
efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
#endif
kunmap_atomic(sglist);
}
mutex_lock(&capsule_mutex);
rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
mutex_unlock(&capsule_mutex);
out:
for (i = 0; rv && i < sg_count; i++) {
if (sg_pages[i])
__free_page(sg_pages[i]);
}
kfree(sg_pages);
return rv;
}
EXPORT_SYMBOL_GPL(efi_capsule_update);
static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
{
mutex_lock(&capsule_mutex);
stop_capsules = true;
mutex_unlock(&capsule_mutex);
return NOTIFY_DONE;
}
static struct notifier_block capsule_reboot_nb = {
.notifier_call = capsule_reboot_notify,
};
static int __init capsule_reboot_register(void)
{
return register_reboot_notifier(&capsule_reboot_nb);
}
core_initcall(capsule_reboot_register);
| linux-master | drivers/firmware/efi/capsule.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Export Runtime Configuration Interface Table Version 2 (RCI2)
* to sysfs
*
* Copyright (C) 2019 Dell Inc
* by Narendra K <[email protected]>
*
* System firmware advertises the address of the RCI2 Table via
* an EFI Configuration Table entry. This code retrieves the RCI2
* table from the address and exports it to sysfs as a binary
* attribute 'rci2' under /sys/firmware/efi/tables directory.
*/
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/efi.h>
#include <linux/types.h>
#include <linux/io.h>
#define RCI_SIGNATURE "_RC_"
struct rci2_table_global_hdr {
u16 type;
u16 resvd0;
u16 hdr_len;
u8 rci2_sig[4];
u16 resvd1;
u32 resvd2;
u32 resvd3;
u8 major_rev;
u8 minor_rev;
u16 num_of_structs;
u32 rci2_len;
u16 rci2_chksum;
} __packed;
static u8 *rci2_base;
static u32 rci2_table_len;
unsigned long rci2_table_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
memcpy(buf, attr->private + pos, count);
return count;
}
static BIN_ATTR(rci2, S_IRUSR, raw_table_read, NULL, 0);
static u16 checksum(void)
{
u8 len_is_odd = rci2_table_len % 2;
u32 chksum_len = rci2_table_len;
u16 *base = (u16 *)rci2_base;
u8 buf[2] = {0};
u32 offset = 0;
u16 chksum = 0;
if (len_is_odd)
chksum_len -= 1;
while (offset < chksum_len) {
chksum += *base;
offset += 2;
base++;
}
if (len_is_odd) {
buf[0] = *(u8 *)base;
chksum += *(u16 *)(buf);
}
return chksum;
}
static int __init efi_rci2_sysfs_init(void)
{
struct kobject *tables_kobj;
int ret = -ENOMEM;
if (rci2_table_phys == EFI_INVALID_TABLE_ADDR)
return 0;
rci2_base = memremap(rci2_table_phys,
sizeof(struct rci2_table_global_hdr),
MEMREMAP_WB);
if (!rci2_base) {
pr_debug("RCI2 table init failed - could not map RCI2 table\n");
goto err;
}
if (strncmp(rci2_base +
offsetof(struct rci2_table_global_hdr, rci2_sig),
RCI_SIGNATURE, 4)) {
pr_debug("RCI2 table init failed - incorrect signature\n");
ret = -ENODEV;
goto err_unmap;
}
rci2_table_len = *(u32 *)(rci2_base +
offsetof(struct rci2_table_global_hdr,
rci2_len));
memunmap(rci2_base);
if (!rci2_table_len) {
pr_debug("RCI2 table init failed - incorrect table length\n");
goto err;
}
rci2_base = memremap(rci2_table_phys, rci2_table_len, MEMREMAP_WB);
if (!rci2_base) {
pr_debug("RCI2 table - could not map RCI2 table\n");
goto err;
}
if (checksum() != 0) {
pr_debug("RCI2 table - incorrect checksum\n");
ret = -ENODEV;
goto err_unmap;
}
tables_kobj = kobject_create_and_add("tables", efi_kobj);
if (!tables_kobj) {
pr_debug("RCI2 table - tables_kobj creation failed\n");
goto err_unmap;
}
bin_attr_rci2.size = rci2_table_len;
bin_attr_rci2.private = rci2_base;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_rci2);
if (ret != 0) {
pr_debug("RCI2 table - rci2 sysfs bin file creation failed\n");
kobject_del(tables_kobj);
kobject_put(tables_kobj);
goto err_unmap;
}
return 0;
err_unmap:
memunmap(rci2_base);
err:
pr_debug("RCI2 table - sysfs initialization failed\n");
return ret;
}
late_initcall(efi_rci2_sysfs_init);
| linux-master | drivers/firmware/efi/rci2-table.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for extracting embedded firmware for peripherals from EFI code,
*
* Copyright (c) 2018 Hans de Goede <[email protected]>
*/
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/efi_embedded_fw.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <crypto/sha2.h>
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
bool efi_embedded_fw_checked;
EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
touchscreen_dmi_table,
#endif
NULL
};
/*
* Note the efi_check_for_embedded_firmwares() code currently makes the
* following 2 assumptions. This may needs to be revisited if embedded firmware
* is found where this is not true:
* 1) The firmware is only found in EFI_BOOT_SERVICES_CODE memory segments
* 2) The firmware always starts at an offset which is a multiple of 8 bytes
*/
static int __init efi_check_md_for_embedded_firmware(
efi_memory_desc_t *md, const struct efi_embedded_fw_desc *desc)
{
struct efi_embedded_fw *fw;
u8 hash[32];
u64 i, size;
u8 *map;
size = md->num_pages << EFI_PAGE_SHIFT;
map = memremap(md->phys_addr, size, MEMREMAP_WB);
if (!map) {
pr_err("Error mapping EFI mem at %#llx\n", md->phys_addr);
return -ENOMEM;
}
for (i = 0; (i + desc->length) <= size; i += 8) {
if (memcmp(map + i, desc->prefix, EFI_EMBEDDED_FW_PREFIX_LEN))
continue;
sha256(map + i, desc->length, hash);
if (memcmp(hash, desc->sha256, 32) == 0)
break;
}
if ((i + desc->length) > size) {
memunmap(map);
return -ENOENT;
}
pr_info("Found EFI embedded fw '%s'\n", desc->name);
fw = kmalloc(sizeof(*fw), GFP_KERNEL);
if (!fw) {
memunmap(map);
return -ENOMEM;
}
fw->data = kmemdup(map + i, desc->length, GFP_KERNEL);
memunmap(map);
if (!fw->data) {
kfree(fw);
return -ENOMEM;
}
fw->name = desc->name;
fw->length = desc->length;
list_add(&fw->list, &efi_embedded_fw_list);
return 0;
}
void __init efi_check_for_embedded_firmwares(void)
{
const struct efi_embedded_fw_desc *fw_desc;
const struct dmi_system_id *dmi_id;
efi_memory_desc_t *md;
int i, r;
for (i = 0; embedded_fw_table[i]; i++) {
dmi_id = dmi_first_match(embedded_fw_table[i]);
if (!dmi_id)
continue;
fw_desc = dmi_id->driver_data;
/*
* In some drivers the struct driver_data contains may contain
* other driver specific data after the fw_desc struct; and
* the fw_desc struct itself may be empty, skip these.
*/
if (!fw_desc->name)
continue;
for_each_efi_memory_desc(md) {
if (md->type != EFI_BOOT_SERVICES_CODE)
continue;
r = efi_check_md_for_embedded_firmware(md, fw_desc);
if (r == 0)
break;
}
}
efi_embedded_fw_checked = true;
}
int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
{
struct efi_embedded_fw *iter, *fw = NULL;
if (!efi_embedded_fw_checked) {
pr_warn("Warning %s called while we did not check for embedded fw\n",
__func__);
return -ENOENT;
}
list_for_each_entry(iter, &efi_embedded_fw_list, list) {
if (strcmp(name, iter->name) == 0) {
fw = iter;
break;
}
}
if (!fw)
return -ENOENT;
*data = fw->data;
*size = fw->length;
return 0;
}
EXPORT_SYMBOL_GPL(efi_get_embedded_fw);
| linux-master | drivers/firmware/efi/embedded-firmware.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.