python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* isst_tpmi.c: SST TPMI interface
*
* Copyright (c) 2023, Intel Corporation.
* All Rights Reserved.
*
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/intel_tpmi.h>
#include "isst_tpmi_core.h"
static int intel_sst_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
{
int ret;
ret = tpmi_sst_init();
if (ret)
return ret;
ret = tpmi_sst_dev_add(auxdev);
if (ret)
tpmi_sst_exit();
return ret;
}
static void intel_sst_remove(struct auxiliary_device *auxdev)
{
tpmi_sst_dev_remove(auxdev);
tpmi_sst_exit();
}
static int intel_sst_suspend(struct device *dev)
{
tpmi_sst_dev_suspend(to_auxiliary_dev(dev));
return 0;
}
static int intel_sst_resume(struct device *dev)
{
tpmi_sst_dev_resume(to_auxiliary_dev(dev));
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(intel_sst_pm, intel_sst_suspend, intel_sst_resume);
static const struct auxiliary_device_id intel_sst_id_table[] = {
{ .name = "intel_vsec.tpmi-sst" },
{}
};
MODULE_DEVICE_TABLE(auxiliary, intel_sst_id_table);
static struct auxiliary_driver intel_sst_aux_driver = {
.id_table = intel_sst_id_table,
.remove = intel_sst_remove,
.probe = intel_sst_probe,
.driver = {
.pm = pm_sleep_ptr(&intel_sst_pm),
},
};
module_auxiliary_driver(intel_sst_aux_driver);
MODULE_IMPORT_NS(INTEL_TPMI_SST);
MODULE_DESCRIPTION("Intel TPMI SST Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/intel/speed_select_if/isst_tpmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* hp_accel.c - Interface between LIS3LV02DL driver and HP ACPI BIOS
*
* Copyright (C) 2007-2008 Yan Burman
* Copyright (C) 2008 Eric Piel
* Copyright (C) 2008-2009 Pavel Machek
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <linux/leds.h>
#include <linux/atomic.h>
#include <linux/acpi.h>
#include <linux/i8042.h>
#include <linux/serio.h>
#include "../../../misc/lis3lv02d/lis3lv02d.h"
/* Delayed LEDs infrastructure ------------------------------------ */
/* Special LED class that can defer work */
struct delayed_led_classdev {
struct led_classdev led_classdev;
struct work_struct work;
enum led_brightness new_brightness;
unsigned int led; /* For driver */
void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
};
static inline void delayed_set_status_worker(struct work_struct *work)
{
struct delayed_led_classdev *data =
container_of(work, struct delayed_led_classdev, work);
data->set_brightness(data, data->new_brightness);
}
static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct delayed_led_classdev *data = container_of(led_cdev,
struct delayed_led_classdev, led_classdev);
data->new_brightness = brightness;
schedule_work(&data->work);
}
/* HP-specific accelerometer driver ------------------------------------ */
/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
* HPQ6000 sends through the keyboard bus */
#define ACCEL_1 0x25
#define ACCEL_2 0x26
#define ACCEL_3 0x27
#define ACCEL_4 0x28
/* For automatic insertion of the module */
static const struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
{"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
/**
* lis3lv02d_acpi_init - initialize the device for ACPI
* @lis3: pointer to the device struct
*
* Returns 0 on success.
*/
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
return 0;
}
/**
* lis3lv02d_acpi_read - ACPI ALRD method: read a register
* @lis3: pointer to the device struct
* @reg: the register to read
* @ret: result of the operation
*
* Returns 0 on success.
*/
static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
{
struct acpi_device *dev = lis3->bus_priv;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
unsigned long long lret;
acpi_status status;
arg0.integer.value = reg;
status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
if (ACPI_FAILURE(status))
return -EINVAL;
*ret = lret;
return 0;
}
/**
* lis3lv02d_acpi_write - ACPI ALWR method: write to a register
* @lis3: pointer to the device struct
* @reg: the register to write to
* @val: the value to write
*
* Returns 0 on success.
*/
static int lis3lv02d_acpi_write(struct lis3lv02d *lis3, int reg, u8 val)
{
struct acpi_device *dev = lis3->bus_priv;
unsigned long long ret; /* Not used when writting */
union acpi_object in_obj[2];
struct acpi_object_list args = { 2, in_obj };
in_obj[0].type = ACPI_TYPE_INTEGER;
in_obj[0].integer.value = reg;
in_obj[1].type = ACPI_TYPE_INTEGER;
in_obj[1].integer.value = val;
if (acpi_evaluate_integer(dev->handle, "ALWR", &args, &ret) != AE_OK)
return -EINVAL;
return 0;
}
static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
{
lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
pr_info("hardware type %s found\n", dmi->ident);
return 1;
}
/* Represents, for each axis seen by userspace, the corresponding hw axis (+1).
* If the value is negative, the opposite of the hw value is used. */
#define DEFINE_CONV(name, x, y, z) \
static union axis_conversion lis3lv02d_axis_##name = \
{ .as_array = { x, y, z } }
DEFINE_CONV(normal, 1, 2, 3);
DEFINE_CONV(y_inverted, 1, -2, 3);
DEFINE_CONV(x_inverted, -1, 2, 3);
DEFINE_CONV(x_inverted_usd, -1, 2, -3);
DEFINE_CONV(z_inverted, 1, 2, -3);
DEFINE_CONV(xy_swap, 2, 1, 3);
DEFINE_CONV(xy_rotated_left, -2, 1, 3);
DEFINE_CONV(xy_rotated_left_usd, -2, 1, -3);
DEFINE_CONV(xy_swap_inverted, -2, -1, 3);
DEFINE_CONV(xy_rotated_right, 2, -1, 3);
DEFINE_CONV(xy_swap_yz_inverted, 2, -1, -3);
#define AXIS_DMI_MATCH(_ident, _name, _axis) { \
.ident = _ident, \
.callback = lis3lv02d_dmi_matched, \
.matches = { \
DMI_MATCH(DMI_PRODUCT_NAME, _name) \
}, \
.driver_data = &lis3lv02d_axis_##_axis \
}
#define AXIS_DMI_MATCH2(_ident, _class1, _name1, \
_class2, _name2, \
_axis) { \
.ident = _ident, \
.callback = lis3lv02d_dmi_matched, \
.matches = { \
DMI_MATCH(DMI_##_class1, _name1), \
DMI_MATCH(DMI_##_class2, _name2), \
}, \
.driver_data = &lis3lv02d_axis_##_axis \
}
static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
/* product names are truncated to match all kinds of a same model */
AXIS_DMI_MATCH("NC64x0", "HP Compaq nc64", x_inverted),
AXIS_DMI_MATCH("NC84x0", "HP Compaq nc84", z_inverted),
AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd),
AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap),
AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap),
AXIS_DMI_MATCH("NC854xx", "HP EliteBook 854", y_inverted),
AXIS_DMI_MATCH("NC273xx", "HP EliteBook 273", y_inverted),
/* Intel-based HP Pavilion dv5 */
AXIS_DMI_MATCH2("HPDV5_I",
PRODUCT_NAME, "HP Pavilion dv5",
BOARD_NAME, "3603",
x_inverted),
/* AMD-based HP Pavilion dv5 */
AXIS_DMI_MATCH2("HPDV5_A",
PRODUCT_NAME, "HP Pavilion dv5",
BOARD_NAME, "3600",
y_inverted),
AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB450G0", "HP ProBook 450 G0", x_inverted),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
AXIS_DMI_MATCH("HPB655x", "HP ProBook 655", xy_swap_inverted),
AXIS_DMI_MATCH("Mini510x", "HP Mini 510", xy_rotated_left_usd),
AXIS_DMI_MATCH("HPB63xx", "HP ProBook 63", xy_swap),
AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
AXIS_DMI_MATCH("HPZBook17G5", "HP ZBook 17 G5", x_inverted),
AXIS_DMI_MATCH("HPZBook17", "HP ZBook 17", xy_swap_yz_inverted),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
* "NC2400" "HP Compaq nc2400"
* "NX74x0" "HP Compaq nx74"
* "NX6325" "HP Compaq nx6325"
* "NC4400" "HP Compaq nc4400"
*/
};
static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
{
struct acpi_device *dev = lis3_dev.bus_priv;
unsigned long long ret; /* Not used when writing */
union acpi_object in_obj[1];
struct acpi_object_list args = { 1, in_obj };
in_obj[0].type = ACPI_TYPE_INTEGER;
in_obj[0].integer.value = !!value;
acpi_evaluate_integer(dev->handle, "ALED", &args, &ret);
}
static struct delayed_led_classdev hpled_led = {
.led_classdev = {
.name = "hp::hddprotect",
.default_trigger = "none",
.brightness_set = delayed_sysfs_set,
.flags = LED_CORE_SUSPENDRESUME,
},
.set_brightness = hpled_set,
};
static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
static bool extended;
if (str & I8042_STR_AUXDATA)
return false;
if (data == 0xe0) {
extended = true;
return true;
} else if (unlikely(extended)) {
extended = false;
switch (data) {
case ACCEL_1:
case ACCEL_2:
case ACCEL_3:
case ACCEL_4:
return true;
default:
serio_interrupt(port, 0xe0, 0);
return false;
}
}
return false;
}
static int lis3lv02d_probe(struct platform_device *device)
{
int ret;
lis3_dev.bus_priv = ACPI_COMPANION(&device->dev);
lis3_dev.init = lis3lv02d_acpi_init;
lis3_dev.read = lis3lv02d_acpi_read;
lis3_dev.write = lis3lv02d_acpi_write;
/* obtain IRQ number of our device from ACPI */
ret = platform_get_irq_optional(device, 0);
if (ret > 0)
lis3_dev.irq = ret;
/* If possible use a "standard" axes order */
if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
pr_info("Using custom axes %d,%d,%d\n",
lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
} else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
pr_info("laptop model unknown, using default axes configuration\n");
lis3_dev.ac = lis3lv02d_axis_normal;
}
/* call the core layer do its init */
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
/* filter to remove HPQ6000 accelerometer data
* from keyboard bus stream */
if (strstr(dev_name(&device->dev), "HPQ6000"))
i8042_install_filter(hp_accel_i8042_filter);
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
if (ret) {
i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
flush_work(&hpled_led.work);
lis3lv02d_remove_fs(&lis3_dev);
return ret;
}
return ret;
}
static void lis3lv02d_remove(struct platform_device *device)
{
i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
led_classdev_unregister(&hpled_led.led_classdev);
flush_work(&hpled_led.work);
lis3lv02d_remove_fs(&lis3_dev);
}
static int __maybe_unused lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
lis3lv02d_poweroff(&lis3_dev);
return 0;
}
static int __maybe_unused lis3lv02d_resume(struct device *dev)
{
lis3lv02d_poweron(&lis3_dev);
return 0;
}
static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
/* For the HP MDPS aka 3D Driveguard */
static struct platform_driver lis3lv02d_driver = {
.probe = lis3lv02d_probe,
.remove_new = lis3lv02d_remove,
.driver = {
.name = "hp_accel",
.pm = &hp_accel_pm,
.acpi_match_table = lis3lv02d_device_ids,
},
};
module_platform_driver(lis3lv02d_driver);
MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/hp/hp_accel.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HP Compaq TC1100 Tablet WMI Extras Driver
*
* Copyright (C) 2007 Carlos Corbacho <[email protected]>
* Copyright (C) 2004 Jamey Hicks <[email protected]>
* Copyright (C) 2001, 2002 Andy Grover <[email protected]>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
#define TC1100_INSTANCE_WIRELESS 1
#define TC1100_INSTANCE_JOGDIAL 2
MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
static struct platform_device *tc1100_device;
struct tc1100_data {
u32 wireless;
u32 jogdial;
};
#ifdef CONFIG_PM
static struct tc1100_data suspend_data;
#endif
/* --------------------------------------------------------------------------
Device Management
-------------------------------------------------------------------------- */
static int get_state(u32 *out, u8 instance)
{
u32 tmp;
acpi_status status;
struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
if (!out)
return -EINVAL;
if (instance > 2)
return -ENODEV;
status = wmi_query_block(GUID, instance, &result);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = (union acpi_object *) result.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER) {
tmp = obj->integer.value;
} else {
tmp = 0;
}
if (result.length > 0)
kfree(result.pointer);
switch (instance) {
case TC1100_INSTANCE_WIRELESS:
*out = (tmp == 3) ? 1 : 0;
return 0;
case TC1100_INSTANCE_JOGDIAL:
*out = (tmp == 1) ? 0 : 1;
return 0;
default:
return -ENODEV;
}
}
static int set_state(u32 *in, u8 instance)
{
u32 value;
acpi_status status;
struct acpi_buffer input;
if (!in)
return -EINVAL;
if (instance > 2)
return -ENODEV;
switch (instance) {
case TC1100_INSTANCE_WIRELESS:
value = (*in) ? 1 : 2;
break;
case TC1100_INSTANCE_JOGDIAL:
value = (*in) ? 0 : 1;
break;
default:
return -ENODEV;
}
input.length = sizeof(u32);
input.pointer = &value;
status = wmi_set_block(GUID, instance, &input);
if (ACPI_FAILURE(status))
return -ENODEV;
return 0;
}
/* --------------------------------------------------------------------------
FS Interface (/sys)
-------------------------------------------------------------------------- */
/*
* Read/ write bool sysfs macro
*/
#define show_set_bool(value, instance) \
static ssize_t \
show_bool_##value(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 result; \
acpi_status status = get_state(&result, instance); \
if (ACPI_SUCCESS(status)) \
return sprintf(buf, "%d\n", result); \
return sprintf(buf, "Read error\n"); \
} \
\
static ssize_t \
set_bool_##value(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
u32 tmp = simple_strtoul(buf, NULL, 10); \
acpi_status status = set_state(&tmp, instance); \
if (ACPI_FAILURE(status)) \
return -EINVAL; \
return count; \
} \
static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
static struct attribute *tc1100_attributes[] = {
&dev_attr_wireless.attr,
&dev_attr_jogdial.attr,
NULL
};
static const struct attribute_group tc1100_attribute_group = {
.attrs = tc1100_attributes,
};
/* --------------------------------------------------------------------------
Driver Model
-------------------------------------------------------------------------- */
static int __init tc1100_probe(struct platform_device *device)
{
return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group);
}
static void tc1100_remove(struct platform_device *device)
{
sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group);
}
#ifdef CONFIG_PM
static int tc1100_suspend(struct device *dev)
{
int ret;
ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
if (ret)
return ret;
ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
if (ret)
return ret;
return 0;
}
static int tc1100_resume(struct device *dev)
{
int ret;
ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
if (ret)
return ret;
ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
if (ret)
return ret;
return 0;
}
static const struct dev_pm_ops tc1100_pm_ops = {
.suspend = tc1100_suspend,
.resume = tc1100_resume,
.freeze = tc1100_suspend,
.restore = tc1100_resume,
};
#endif
static struct platform_driver tc1100_driver = {
.driver = {
.name = "tc1100-wmi",
#ifdef CONFIG_PM
.pm = &tc1100_pm_ops,
#endif
},
.remove_new = tc1100_remove,
};
static int __init tc1100_init(void)
{
int error;
if (!wmi_has_guid(GUID))
return -ENODEV;
tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
if (!tc1100_device)
return -ENOMEM;
error = platform_device_add(tc1100_device);
if (error)
goto err_device_put;
error = platform_driver_probe(&tc1100_driver, tc1100_probe);
if (error)
goto err_device_del;
pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n");
return 0;
err_device_del:
platform_device_del(tc1100_device);
err_device_put:
platform_device_put(tc1100_device);
return error;
}
static void __exit tc1100_exit(void)
{
platform_device_unregister(tc1100_device);
platform_driver_unregister(&tc1100_driver);
}
module_init(tc1100_init);
module_exit(tc1100_exit);
| linux-master | drivers/platform/x86/hp/tc1100-wmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HP WMI hotkeys
*
* Copyright (C) 2008 Red Hat <[email protected]>
* Copyright (C) 2010, 2011 Anssi Hannula <[email protected]>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <[email protected]>
* Copyright (C) 2005 Bernhard Rosenkraenzer <[email protected]>
* Copyright (C) 2005 Dmitry Torokhov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/platform_profile.h>
#include <linux/hwmon.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <[email protected]>");
MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required
/* DMI board names of devices that should use the omen specific path for
* thermal profiles.
* This was obtained by taking a look in the windows omen command center
* app and parsing a json file that they use to figure out what capabilities
* the device should have.
* A device is considered an omen if the DisplayName in that list contains
* "OMEN", and it can use the thermal profile stuff if the "Feature" array
* contains "PerformanceControl".
*/
static const char * const omen_thermal_profile_boards[] = {
"84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
"8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
"8917", "8918", "8949", "894A", "89EB"
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
* profile version 0 by the Omen Command Center app, regardless of what
* the get system design information WMI call returns
*/
static const char *const omen_thermal_profile_force_v0_boards[] = {
"8607", "8746", "8747", "8749", "874A", "8748"
};
/* DMI Board names of Victus laptops */
static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
enum hp_wmi_radio {
HPWMI_WIFI = 0x0,
HPWMI_BLUETOOTH = 0x1,
HPWMI_WWAN = 0x2,
HPWMI_GPS = 0x3,
};
enum hp_wmi_event_ids {
HPWMI_DOCK_EVENT = 0x01,
HPWMI_PARK_HDD = 0x02,
HPWMI_SMART_ADAPTER = 0x03,
HPWMI_BEZEL_BUTTON = 0x04,
HPWMI_WIRELESS = 0x05,
HPWMI_CPU_BATTERY_THROTTLE = 0x06,
HPWMI_LOCK_SWITCH = 0x07,
HPWMI_LID_SWITCH = 0x08,
HPWMI_SCREEN_ROTATION = 0x09,
HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
HPWMI_PROXIMITY_SENSOR = 0x0C,
HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
HPWMI_PEAKSHIFT_PERIOD = 0x0F,
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
HPWMI_CAMERA_TOGGLE = 0x1A,
HPWMI_OMEN_KEY = 0x1D,
HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
/*
* struct bios_args buffer is dynamically allocated. New WMI command types
* were introduced that exceeds 128-byte data size. Changes to handle
* the data size allocation scheme were kept in hp_wmi_perform_qurey function.
*/
struct bios_args {
u32 signature;
u32 command;
u32 commandtype;
u32 datasize;
u8 data[];
};
enum hp_wmi_commandtype {
HPWMI_DISPLAY_QUERY = 0x01,
HPWMI_HDDTEMP_QUERY = 0x02,
HPWMI_ALS_QUERY = 0x03,
HPWMI_HARDWARE_QUERY = 0x04,
HPWMI_WIRELESS_QUERY = 0x05,
HPWMI_BATTERY_QUERY = 0x07,
HPWMI_BIOS_QUERY = 0x09,
HPWMI_FEATURE_QUERY = 0x0b,
HPWMI_HOTKEY_QUERY = 0x0c,
HPWMI_FEATURE2_QUERY = 0x0d,
HPWMI_WIRELESS2_QUERY = 0x1b,
HPWMI_POSTCODEERROR_QUERY = 0x2a,
HPWMI_SYSTEM_DEVICE_MODE = 0x40,
HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
enum hp_wmi_gm_commandtype {
HPWMI_FAN_SPEED_GET_QUERY = 0x11,
HPWMI_SET_PERFORMANCE_MODE = 0x1A,
HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
HPWMI_GET_SYSTEM_DESIGN_DATA = 0x28,
};
enum hp_wmi_command {
HPWMI_READ = 0x01,
HPWMI_WRITE = 0x02,
HPWMI_ODM = 0x03,
HPWMI_GM = 0x20008,
};
enum hp_wmi_hardware_mask {
HPWMI_DOCK_MASK = 0x01,
HPWMI_TABLET_MASK = 0x04,
};
struct bios_return {
u32 sigpass;
u32 return_code;
};
enum hp_return_value {
HPWMI_RET_WRONG_SIGNATURE = 0x02,
HPWMI_RET_UNKNOWN_COMMAND = 0x03,
HPWMI_RET_UNKNOWN_CMDTYPE = 0x04,
HPWMI_RET_INVALID_PARAMETERS = 0x05,
};
enum hp_wireless2_bits {
HPWMI_POWER_STATE = 0x01,
HPWMI_POWER_SOFT = 0x02,
HPWMI_POWER_BIOS = 0x04,
HPWMI_POWER_HARD = 0x08,
HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
};
enum hp_thermal_profile_omen_v0 {
HP_OMEN_V0_THERMAL_PROFILE_DEFAULT = 0x00,
HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE = 0x01,
HP_OMEN_V0_THERMAL_PROFILE_COOL = 0x02,
};
enum hp_thermal_profile_omen_v1 {
HP_OMEN_V1_THERMAL_PROFILE_DEFAULT = 0x30,
HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE = 0x31,
HP_OMEN_V1_THERMAL_PROFILE_COOL = 0x50,
};
enum hp_thermal_profile_victus {
HP_VICTUS_THERMAL_PROFILE_DEFAULT = 0x00,
HP_VICTUS_THERMAL_PROFILE_PERFORMANCE = 0x01,
HP_VICTUS_THERMAL_PROFILE_QUIET = 0x03,
};
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
HP_THERMAL_PROFILE_COOL = 0x02,
HP_THERMAL_PROFILE_QUIET = 0x03,
};
#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
#define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT)
struct bios_rfkill2_device_state {
u8 radio_type;
u8 bus_type;
u16 vendor_id;
u16 product_id;
u16 subsys_vendor_id;
u16 subsys_product_id;
u8 rfkill_id;
u8 power;
u8 unknown[4];
};
/* 7 devices fit into the 128 byte buffer */
#define HPWMI_MAX_RFKILL2_DEVICES 7
struct bios_rfkill2_state {
u8 unknown[7];
u8 count;
u8 pad[8];
struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES];
};
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
{ KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_IGNORE, 0x21a4, }, /* Win Lock On */
{ KE_IGNORE, 0x121a4, }, /* Win Lock Off */
{ KE_KEY, 0x21a5, { KEY_PROG2 } }, /* HP Omen Key */
{ KE_KEY, 0x21a7, { KEY_FN_ESC } },
{ KE_KEY, 0x21a8, { KEY_PROG2 } }, /* HP Envy x360 programmable key */
{ KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
{ KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
static struct input_dev *hp_wmi_input_dev;
static struct input_dev *camera_shutter_input_dev;
static struct platform_device *hp_wmi_platform_dev;
static struct platform_profile_handler platform_profile_handler;
static bool platform_profile_support;
static bool zero_insize_support;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
struct rfkill2_device {
u8 id;
int num;
struct rfkill *rfkill;
};
static int rfkill2_count;
static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
/*
* Chassis Types values were obtained from SMBIOS reference
* specification version 3.00. A complete list of system enclosures
* and chassis types is available on Table 17.
*/
static const char * const tablet_chassis_types[] = {
"30", /* Tablet*/
"31", /* Convertible */
"32" /* Detachable */
};
#define DEVICE_MODE_TABLET 0x06
/* map output size to the corresponding WMI method id */
static inline int encode_outsize_for_pvsz(int outsize)
{
if (outsize > 4096)
return -EINVAL;
if (outsize > 1024)
return 5;
if (outsize > 128)
return 4;
if (outsize > 4)
return 3;
if (outsize > 0)
return 2;
return 1;
}
/*
* hp_wmi_perform_query
*
* query: The commandtype (enum hp_wmi_commandtype)
* write: The command (enum hp_wmi_command)
* buffer: Buffer used as input and/or output
* insize: Size of input buffer
* outsize: Size of output buffer
*
* returns zero on success
* an HP WMI query specific error code (which is positive)
* -EINVAL if the query was not successful at all
* -EINVAL if the output buffer size exceeds buffersize
*
* Note: The buffersize must at least be the maximum of the input and output
* size. E.g. Battery info query is defined to have 1 byte input
* and 128 byte output. The caller would do:
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128)
*/
static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
void *buffer, int insize, int outsize)
{
struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
struct bios_return *bios_return;
union acpi_object *obj = NULL;
struct bios_args *args = NULL;
int mid, actual_insize, actual_outsize;
size_t bios_args_size;
int ret;
mid = encode_outsize_for_pvsz(outsize);
if (WARN_ON(mid < 0))
return mid;
actual_insize = max(insize, 128);
bios_args_size = struct_size(args, data, actual_insize);
args = kmalloc(bios_args_size, GFP_KERNEL);
if (!args)
return -ENOMEM;
input.length = bios_args_size;
input.pointer = args;
args->signature = 0x55434553;
args->command = command;
args->commandtype = query;
args->datasize = insize;
memcpy(args->data, buffer, flex_array_size(args, data, insize));
ret = wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
if (ret)
goto out_free;
obj = output.pointer;
if (!obj) {
ret = -EINVAL;
goto out_free;
}
if (obj->type != ACPI_TYPE_BUFFER) {
pr_warn("query 0x%x returned an invalid object 0x%x\n", query, ret);
ret = -EINVAL;
goto out_free;
}
bios_return = (struct bios_return *)obj->buffer.pointer;
ret = bios_return->return_code;
if (ret) {
if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
ret != HPWMI_RET_UNKNOWN_CMDTYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
/* Ignore output data of zero size */
if (!outsize)
goto out_free;
actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return)));
memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize);
memset(buffer + actual_outsize, 0, outsize - actual_outsize);
out_free:
kfree(obj);
kfree(args);
return ret;
}
static int hp_wmi_get_fan_speed(int fan)
{
u8 fsh, fsl;
char fan_data[4] = { fan, 0, 0, 0 };
int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
&fan_data, sizeof(char),
sizeof(fan_data));
if (ret != 0)
return -EINVAL;
fsh = fan_data[2];
fsl = fan_data[3];
return (fsh << 8) | fsl;
}
static int hp_wmi_read_int(int query)
{
int val = 0, ret;
ret = hp_wmi_perform_query(query, HPWMI_READ, &val,
zero_if_sup(val), sizeof(val));
if (ret)
return ret < 0 ? ret : -EINVAL;
return val;
}
static int hp_wmi_get_dock_state(void)
{
int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY);
if (state < 0)
return state;
return !!(state & HPWMI_DOCK_MASK);
}
static int hp_wmi_get_tablet_mode(void)
{
char system_device_mode[4] = { 0 };
const char *chassis_type;
bool tablet_found;
int ret;
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
if (!chassis_type)
return -ENODEV;
tablet_found = match_string(tablet_chassis_types,
ARRAY_SIZE(tablet_chassis_types),
chassis_type) >= 0;
if (!tablet_found)
return -ENODEV;
ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ,
system_device_mode, zero_if_sup(system_device_mode),
sizeof(system_device_mode));
if (ret < 0)
return ret;
return system_device_mode[0] == DEVICE_MODE_TABLET;
}
static int omen_thermal_profile_set(int mode)
{
char buffer[2] = {0, mode};
int ret;
ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
&buffer, sizeof(buffer), 0);
if (ret)
return ret < 0 ? ret : -EINVAL;
return mode;
}
static bool is_omen_thermal_profile(void)
{
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (!board_name)
return false;
return match_string(omen_thermal_profile_boards,
ARRAY_SIZE(omen_thermal_profile_boards),
board_name) >= 0;
}
static int omen_get_thermal_policy_version(void)
{
unsigned char buffer[8] = { 0 };
int ret;
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_name) {
int matches = match_string(omen_thermal_profile_force_v0_boards,
ARRAY_SIZE(omen_thermal_profile_force_v0_boards),
board_name);
if (matches >= 0)
return 0;
}
ret = hp_wmi_perform_query(HPWMI_GET_SYSTEM_DESIGN_DATA, HPWMI_GM,
&buffer, sizeof(buffer), sizeof(buffer));
if (ret)
return ret < 0 ? ret : -EINVAL;
return buffer[3];
}
static int omen_thermal_profile_get(void)
{
u8 data;
int ret = ec_read(HP_OMEN_EC_THERMAL_PROFILE_OFFSET, &data);
if (ret)
return ret;
return data;
}
static int hp_wmi_fan_speed_max_set(int enabled)
{
int ret;
ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
&enabled, sizeof(enabled), 0);
if (ret)
return ret < 0 ? ret : -EINVAL;
return enabled;
}
static int hp_wmi_fan_speed_max_get(void)
{
int val = 0, ret;
ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
&val, zero_if_sup(val), sizeof(val));
if (ret)
return ret < 0 ? ret : -EINVAL;
return val;
}
static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state,
zero_if_sup(state), sizeof(state));
if (!ret)
return 1;
return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
}
static int __init hp_wmi_bios_2009_later(void)
{
u8 state[128];
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state,
zero_if_sup(state), sizeof(state));
if (!ret)
return 1;
return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
}
static int __init hp_wmi_enable_hotkeys(void)
{
int value = 0x6e;
int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value,
sizeof(value), 0);
return ret <= 0 ? ret : -EINVAL;
}
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (long)data;
int query = BIT(r + 8) | ((!blocked) << r);
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE,
&query, sizeof(query), 0);
return ret <= 0 ? ret : -EINVAL;
}
static const struct rfkill_ops hp_wmi_rfkill_ops = {
.set_block = hp_wmi_set_block,
};
static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
int mask = 0x200 << (r * 8);
int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
/* TBD: Pass error */
WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
return !(wireless & mask);
}
static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
int mask = 0x800 << (r * 8);
int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
/* TBD: Pass error */
WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY");
return !(wireless & mask);
}
static int hp_wmi_rfkill2_set_block(void *data, bool blocked)
{
int rfkill_id = (int)(long)data;
char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked };
int ret;
ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE,
buffer, sizeof(buffer), 0);
return ret <= 0 ? ret : -EINVAL;
}
static const struct rfkill_ops hp_wmi_rfkill2_ops = {
.set_block = hp_wmi_rfkill2_set_block,
};
static int hp_wmi_rfkill2_refresh(void)
{
struct bios_rfkill2_state state;
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
zero_if_sup(state), sizeof(state));
if (err)
return err;
for (i = 0; i < rfkill2_count; i++) {
int num = rfkill2[i].num;
struct bios_rfkill2_device_state *devstate;
devstate = &state.device[num];
if (num >= state.count ||
devstate->rfkill_id != rfkill2[i].id) {
pr_warn("power configuration of the wireless devices unexpectedly changed\n");
continue;
}
rfkill_set_states(rfkill2[i].rfkill,
IS_SWBLOCKED(devstate->power),
IS_HWBLOCKED(devstate->power));
}
return 0;
}
static ssize_t display_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY);
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY);
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
static ssize_t als_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_read_int(HPWMI_ALS_QUERY);
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_get_dock_state();
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
static ssize_t tablet_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int value = hp_wmi_get_tablet_mode();
if (value < 0)
return value;
return sprintf(buf, "%d\n", value);
}
static ssize_t postcode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
/* Get the POST error code of previous boot failure. */
int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY);
if (value < 0)
return value;
return sprintf(buf, "0x%x\n", value);
}
static ssize_t als_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp;
int ret;
ret = kstrtou32(buf, 10, &tmp);
if (ret)
return ret;
ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), 0);
if (ret)
return ret < 0 ? ret : -EINVAL;
return count;
}
static ssize_t postcode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 tmp = 1;
bool clear;
int ret;
ret = kstrtobool(buf, &clear);
if (ret)
return ret;
if (clear == false)
return -EINVAL;
/* Clear the POST error code. It is kept until cleared. */
ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp,
sizeof(tmp), 0);
if (ret)
return ret < 0 ? ret : -EINVAL;
return count;
}
static int camera_shutter_input_setup(void)
{
int err;
camera_shutter_input_dev = input_allocate_device();
if (!camera_shutter_input_dev)
return -ENOMEM;
camera_shutter_input_dev->name = "HP WMI camera shutter";
camera_shutter_input_dev->phys = "wmi/input1";
camera_shutter_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, camera_shutter_input_dev->evbit);
__set_bit(SW_CAMERA_LENS_COVER, camera_shutter_input_dev->swbit);
err = input_register_device(camera_shutter_input_dev);
if (err)
goto err_free_dev;
return 0;
err_free_dev:
input_free_device(camera_shutter_input_dev);
camera_shutter_input_dev = NULL;
return err;
}
static DEVICE_ATTR_RO(display);
static DEVICE_ATTR_RO(hddtemp);
static DEVICE_ATTR_RW(als);
static DEVICE_ATTR_RO(dock);
static DEVICE_ATTR_RO(tablet);
static DEVICE_ATTR_RW(postcode);
static struct attribute *hp_wmi_attrs[] = {
&dev_attr_display.attr,
&dev_attr_hddtemp.attr,
&dev_attr_als.attr,
&dev_attr_dock.attr,
&dev_attr_tablet.attr,
&dev_attr_postcode.attr,
NULL,
};
ATTRIBUTE_GROUPS(hp_wmi);
static void hp_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
u32 event_id, event_data;
union acpi_object *obj;
acpi_status status;
u32 *location;
int key_code;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (!obj)
return;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_info("Unknown response received %d\n", obj->type);
kfree(obj);
return;
}
/*
* Depending on ACPI version the concatenation of id and event data
* inside _WED function will result in a 8 or 16 byte buffer.
*/
location = (u32 *)obj->buffer.pointer;
if (obj->buffer.length == 8) {
event_id = *location;
event_data = *(location + 1);
} else if (obj->buffer.length == 16) {
event_id = *location;
event_data = *(location + 2);
} else {
pr_info("Unknown buffer length %d\n", obj->buffer.length);
kfree(obj);
return;
}
kfree(obj);
switch (event_id) {
case HPWMI_DOCK_EVENT:
if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_get_dock_state());
if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_get_tablet_mode());
input_sync(hp_wmi_input_dev);
break;
case HPWMI_PARK_HDD:
break;
case HPWMI_SMART_ADAPTER:
break;
case HPWMI_BEZEL_BUTTON:
key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
if (key_code < 0)
break;
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_OMEN_KEY:
if (event_data) /* Only should be true for HP Omen */
key_code = event_data;
else
key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY);
if (!sparse_keymap_report_event(hp_wmi_input_dev,
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
case HPWMI_WIRELESS:
if (rfkill2_count) {
hp_wmi_rfkill2_refresh();
break;
}
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
break;
case HPWMI_LOCK_SWITCH:
break;
case HPWMI_LID_SWITCH:
break;
case HPWMI_SCREEN_ROTATION:
break;
case HPWMI_COOLSENSE_SYSTEM_MOBILE:
break;
case HPWMI_COOLSENSE_SYSTEM_HOT:
break;
case HPWMI_PROXIMITY_SENSOR:
break;
case HPWMI_BACKLIT_KB_BRIGHTNESS:
break;
case HPWMI_PEAKSHIFT_PERIOD:
break;
case HPWMI_BATTERY_CHARGE_PERIOD:
break;
case HPWMI_SANITIZATION_MODE:
break;
case HPWMI_CAMERA_TOGGLE:
if (!camera_shutter_input_dev)
if (camera_shutter_input_setup()) {
pr_err("Failed to setup camera shutter input device\n");
break;
}
if (event_data == 0xff)
input_report_switch(camera_shutter_input_dev, SW_CAMERA_LENS_COVER, 1);
else if (event_data == 0xfe)
input_report_switch(camera_shutter_input_dev, SW_CAMERA_LENS_COVER, 0);
else
pr_warn("Unknown camera shutter state - 0x%x\n", event_data);
input_sync(camera_shutter_input_dev);
break;
case HPWMI_SMART_EXPERIENCE_APP:
break;
default:
pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
break;
}
}
static int __init hp_wmi_input_setup(void)
{
acpi_status status;
int err, val;
hp_wmi_input_dev = input_allocate_device();
if (!hp_wmi_input_dev)
return -ENOMEM;
hp_wmi_input_dev->name = "HP WMI hotkeys";
hp_wmi_input_dev->phys = "wmi/input0";
hp_wmi_input_dev->id.bustype = BUS_HOST;
__set_bit(EV_SW, hp_wmi_input_dev->evbit);
/* Dock */
val = hp_wmi_get_dock_state();
if (!(val < 0)) {
__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
}
/* Tablet mode */
val = hp_wmi_get_tablet_mode();
if (!(val < 0)) {
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
if (err)
goto err_free_dev;
/* Set initial hardware state */
input_sync(hp_wmi_input_dev);
if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
hp_wmi_enable_hotkeys();
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
if (ACPI_FAILURE(status)) {
err = -EIO;
goto err_free_dev;
}
err = input_register_device(hp_wmi_input_dev);
if (err)
goto err_uninstall_notifier;
return 0;
err_uninstall_notifier:
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
err_free_dev:
input_free_device(hp_wmi_input_dev);
return err;
}
static void hp_wmi_input_destroy(void)
{
wmi_remove_notify_handler(HPWMI_EVENT_GUID);
input_unregister_device(hp_wmi_input_dev);
}
static int __init hp_wmi_rfkill_setup(struct platform_device *device)
{
int err, wireless;
wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY);
if (wireless < 0)
return wireless;
err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless,
sizeof(wireless), 0);
if (err)
return err;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WIFI);
if (!wifi_rfkill)
return -ENOMEM;
rfkill_init_sw_state(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI));
rfkill_set_hw_state(wifi_rfkill,
hp_wmi_get_hw_state(HPWMI_WIFI));
err = rfkill_register(wifi_rfkill);
if (err)
goto register_wifi_error;
}
if (wireless & 0x2) {
bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
(void *) HPWMI_BLUETOOTH);
if (!bluetooth_rfkill) {
err = -ENOMEM;
goto register_bluetooth_error;
}
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
rfkill_set_hw_state(bluetooth_rfkill,
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
}
if (wireless & 0x4) {
wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
goto register_wwan_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
rfkill_set_hw_state(wwan_rfkill,
hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_error;
}
return 0;
register_wwan_error:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
rfkill_destroy(bluetooth_rfkill);
bluetooth_rfkill = NULL;
if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
register_wifi_error:
rfkill_destroy(wifi_rfkill);
wifi_rfkill = NULL;
return err;
}
static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
{
struct bios_rfkill2_state state;
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
zero_if_sup(state), sizeof(state));
if (err)
return err < 0 ? err : -EINVAL;
if (state.count > HPWMI_MAX_RFKILL2_DEVICES) {
pr_warn("unable to parse 0x1b query output\n");
return -EINVAL;
}
for (i = 0; i < state.count; i++) {
struct rfkill *rfkill;
enum rfkill_type type;
char *name;
switch (state.device[i].radio_type) {
case HPWMI_WIFI:
type = RFKILL_TYPE_WLAN;
name = "hp-wifi";
break;
case HPWMI_BLUETOOTH:
type = RFKILL_TYPE_BLUETOOTH;
name = "hp-bluetooth";
break;
case HPWMI_WWAN:
type = RFKILL_TYPE_WWAN;
name = "hp-wwan";
break;
case HPWMI_GPS:
type = RFKILL_TYPE_GPS;
name = "hp-gps";
break;
default:
pr_warn("unknown device type 0x%x\n",
state.device[i].radio_type);
continue;
}
if (!state.device[i].vendor_id) {
pr_warn("zero device %d while %d reported\n",
i, state.count);
continue;
}
rfkill = rfkill_alloc(name, &device->dev, type,
&hp_wmi_rfkill2_ops, (void *)(long)i);
if (!rfkill) {
err = -ENOMEM;
goto fail;
}
rfkill2[rfkill2_count].id = state.device[i].rfkill_id;
rfkill2[rfkill2_count].num = i;
rfkill2[rfkill2_count].rfkill = rfkill;
rfkill_init_sw_state(rfkill,
IS_SWBLOCKED(state.device[i].power));
rfkill_set_hw_state(rfkill,
IS_HWBLOCKED(state.device[i].power));
if (!(state.device[i].power & HPWMI_POWER_BIOS))
pr_info("device %s blocked by BIOS\n", name);
err = rfkill_register(rfkill);
if (err) {
rfkill_destroy(rfkill);
goto fail;
}
rfkill2_count++;
}
return 0;
fail:
for (; rfkill2_count > 0; rfkill2_count--) {
rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill);
rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill);
}
return err;
}
static int platform_profile_omen_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
int tp;
tp = omen_thermal_profile_get();
if (tp < 0)
return tp;
switch (tp) {
case HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE:
case HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE:
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case HP_OMEN_V0_THERMAL_PROFILE_DEFAULT:
case HP_OMEN_V1_THERMAL_PROFILE_DEFAULT:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case HP_OMEN_V0_THERMAL_PROFILE_COOL:
case HP_OMEN_V1_THERMAL_PROFILE_COOL:
*profile = PLATFORM_PROFILE_COOL;
break;
default:
return -EINVAL;
}
return 0;
}
static int platform_profile_omen_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
int err, tp, tp_version;
tp_version = omen_get_thermal_policy_version();
if (tp_version < 0 || tp_version > 1)
return -EOPNOTSUPP;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
if (tp_version == 0)
tp = HP_OMEN_V0_THERMAL_PROFILE_PERFORMANCE;
else
tp = HP_OMEN_V1_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
if (tp_version == 0)
tp = HP_OMEN_V0_THERMAL_PROFILE_DEFAULT;
else
tp = HP_OMEN_V1_THERMAL_PROFILE_DEFAULT;
break;
case PLATFORM_PROFILE_COOL:
if (tp_version == 0)
tp = HP_OMEN_V0_THERMAL_PROFILE_COOL;
else
tp = HP_OMEN_V1_THERMAL_PROFILE_COOL;
break;
default:
return -EOPNOTSUPP;
}
err = omen_thermal_profile_set(tp);
if (err < 0)
return err;
return 0;
}
static int thermal_profile_get(void)
{
return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
}
static int thermal_profile_set(int thermal_profile)
{
return hp_wmi_perform_query(HPWMI_THERMAL_PROFILE_QUERY, HPWMI_WRITE, &thermal_profile,
sizeof(thermal_profile), 0);
}
static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
int tp;
tp = thermal_profile_get();
if (tp < 0)
return tp;
switch (tp) {
case HP_THERMAL_PROFILE_PERFORMANCE:
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case HP_THERMAL_PROFILE_DEFAULT:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case HP_THERMAL_PROFILE_COOL:
*profile = PLATFORM_PROFILE_COOL;
break;
case HP_THERMAL_PROFILE_QUIET:
*profile = PLATFORM_PROFILE_QUIET;
break;
default:
return -EINVAL;
}
return 0;
}
static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
int err, tp;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
tp = HP_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
tp = HP_THERMAL_PROFILE_DEFAULT;
break;
case PLATFORM_PROFILE_COOL:
tp = HP_THERMAL_PROFILE_COOL;
break;
case PLATFORM_PROFILE_QUIET:
tp = HP_THERMAL_PROFILE_QUIET;
break;
default:
return -EOPNOTSUPP;
}
err = thermal_profile_set(tp);
if (err)
return err;
return 0;
}
static bool is_victus_thermal_profile(void)
{
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (!board_name)
return false;
return match_string(victus_thermal_profile_boards,
ARRAY_SIZE(victus_thermal_profile_boards),
board_name) >= 0;
}
static int platform_profile_victus_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
int tp;
tp = omen_thermal_profile_get();
if (tp < 0)
return tp;
switch (tp) {
case HP_VICTUS_THERMAL_PROFILE_PERFORMANCE:
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case HP_VICTUS_THERMAL_PROFILE_DEFAULT:
*profile = PLATFORM_PROFILE_BALANCED;
break;
case HP_VICTUS_THERMAL_PROFILE_QUIET:
*profile = PLATFORM_PROFILE_QUIET;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int platform_profile_victus_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
int err, tp;
switch (profile) {
case PLATFORM_PROFILE_PERFORMANCE:
tp = HP_VICTUS_THERMAL_PROFILE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
tp = HP_VICTUS_THERMAL_PROFILE_DEFAULT;
break;
case PLATFORM_PROFILE_QUIET:
tp = HP_VICTUS_THERMAL_PROFILE_QUIET;
break;
default:
return -EOPNOTSUPP;
}
err = omen_thermal_profile_set(tp);
if (err < 0)
return err;
return 0;
}
static int thermal_profile_setup(void)
{
int err, tp;
if (is_omen_thermal_profile()) {
tp = omen_thermal_profile_get();
if (tp < 0)
return tp;
/*
* call thermal profile write command to ensure that the
* firmware correctly sets the OEM variables
*/
err = omen_thermal_profile_set(tp);
if (err < 0)
return err;
platform_profile_handler.profile_get = platform_profile_omen_get;
platform_profile_handler.profile_set = platform_profile_omen_set;
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
} else if (is_victus_thermal_profile()) {
tp = omen_thermal_profile_get();
if (tp < 0)
return tp;
/*
* call thermal profile write command to ensure that the
* firmware correctly sets the OEM variables
*/
err = omen_thermal_profile_set(tp);
if (err < 0)
return err;
platform_profile_handler.profile_get = platform_profile_victus_get;
platform_profile_handler.profile_set = platform_profile_victus_set;
set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
} else {
tp = thermal_profile_get();
if (tp < 0)
return tp;
/*
* call thermal profile write command to ensure that the
* firmware correctly sets the OEM variables for the DPTF
*/
err = thermal_profile_set(tp);
if (err)
return err;
platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
}
set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, platform_profile_handler.choices);
err = platform_profile_register(&platform_profile_handler);
if (err)
return err;
platform_profile_support = true;
return 0;
}
static int hp_wmi_hwmon_init(void);
static int __init hp_wmi_bios_setup(struct platform_device *device)
{
int err;
/* clear detected rfkill devices */
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
rfkill2_count = 0;
/*
* In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
* BIOS no longer controls the power for the wireless
* devices. All features supported by this command will no
* longer be supported.
*/
if (!hp_wmi_bios_2009_later()) {
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
}
err = hp_wmi_hwmon_init();
if (err < 0)
return err;
thermal_profile_setup();
return 0;
}
static int __exit hp_wmi_bios_remove(struct platform_device *device)
{
int i;
for (i = 0; i < rfkill2_count; i++) {
rfkill_unregister(rfkill2[i].rfkill);
rfkill_destroy(rfkill2[i].rfkill);
}
if (wifi_rfkill) {
rfkill_unregister(wifi_rfkill);
rfkill_destroy(wifi_rfkill);
}
if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
rfkill_destroy(bluetooth_rfkill);
}
if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
if (platform_profile_support)
platform_profile_remove();
return 0;
}
static int hp_wmi_resume_handler(struct device *device)
{
/*
* Hardware state may have changed while suspended, so trigger
* input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
if (hp_wmi_input_dev) {
if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_DOCK,
hp_wmi_get_dock_state());
if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
hp_wmi_get_tablet_mode());
input_sync(hp_wmi_input_dev);
}
if (rfkill2_count)
hp_wmi_rfkill2_refresh();
if (wifi_rfkill)
rfkill_set_states(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI),
hp_wmi_get_hw_state(HPWMI_WIFI));
if (bluetooth_rfkill)
rfkill_set_states(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
if (wwan_rfkill)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
return 0;
}
static const struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
.pm = &hp_wmi_pm_ops,
.dev_groups = hp_wmi_groups,
},
.remove = __exit_p(hp_wmi_bios_remove),
};
static umode_t hp_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
switch (type) {
case hwmon_pwm:
return 0644;
case hwmon_fan:
if (hp_wmi_get_fan_speed(channel) >= 0)
return 0444;
break;
default:
return 0;
}
return 0;
}
static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
int ret;
switch (type) {
case hwmon_fan:
ret = hp_wmi_get_fan_speed(channel);
if (ret < 0)
return ret;
*val = ret;
return 0;
case hwmon_pwm:
switch (hp_wmi_fan_speed_max_get()) {
case 0:
/* 0 is automatic fan, which is 2 for hwmon */
*val = 2;
return 0;
case 1:
/* 1 is max fan, which is 0
* (no fan speed control) for hwmon
*/
*val = 0;
return 0;
default:
/* shouldn't happen */
return -ENODATA;
}
default:
return -EINVAL;
}
}
static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
switch (type) {
case hwmon_pwm:
switch (val) {
case 0:
/* 0 is no fan speed control (max), which is 1 for us */
return hp_wmi_fan_speed_max_set(1);
case 2:
/* 2 is automatic speed control, which is 0 for us */
return hp_wmi_fan_speed_max_set(0);
default:
/* we don't support manual fan speed control */
return -EINVAL;
}
default:
return -EOPNOTSUPP;
}
}
static const struct hwmon_channel_info * const info[] = {
HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT),
HWMON_CHANNEL_INFO(pwm, HWMON_PWM_ENABLE),
NULL
};
static const struct hwmon_ops ops = {
.is_visible = hp_wmi_hwmon_is_visible,
.read = hp_wmi_hwmon_read,
.write = hp_wmi_hwmon_write,
};
static const struct hwmon_chip_info chip_info = {
.ops = &ops,
.info = info,
};
static int hp_wmi_hwmon_init(void)
{
struct device *dev = &hp_wmi_platform_dev->dev;
struct device *hwmon;
hwmon = devm_hwmon_device_register_with_info(dev, "hp", &hp_wmi_driver,
&chip_info, NULL);
if (IS_ERR(hwmon)) {
dev_err(dev, "Could not register hp hwmon device\n");
return PTR_ERR(hwmon);
}
return 0;
}
static int __init hp_wmi_init(void)
{
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
int err, tmp = 0;
if (!bios_capable && !event_capable)
return -ENODEV;
if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp,
sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS)
zero_insize_support = true;
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
return err;
}
if (bios_capable) {
hp_wmi_platform_dev =
platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(hp_wmi_platform_dev)) {
err = PTR_ERR(hp_wmi_platform_dev);
goto err_destroy_input;
}
err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
if (err)
goto err_unregister_device;
}
return 0;
err_unregister_device:
platform_device_unregister(hp_wmi_platform_dev);
err_destroy_input:
if (event_capable)
hp_wmi_input_destroy();
return err;
}
module_init(hp_wmi_init);
static void __exit hp_wmi_exit(void)
{
if (wmi_has_guid(HPWMI_EVENT_GUID))
hp_wmi_input_destroy();
if (camera_shutter_input_dev)
input_unregister_device(camera_shutter_input_dev);
if (hp_wmi_platform_dev) {
platform_device_unregister(hp_wmi_platform_dev);
platform_driver_unregister(&hp_wmi_driver);
}
}
module_exit(hp_wmi_exit);
| linux-master | drivers/platform/x86/hp/hp-wmi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to sure start object type attributes under
* BIOS for use with hp-bioscfg driver
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
#include <linux/types.h>
/* Maximum number of log entries supported when log entry size is 16
* bytes. This value is calculated by dividing 4096 (page size) by
* log entry size.
*/
#define LOG_MAX_ENTRIES 254
/*
* Current Log entry size. This value size will change in the
* future. The driver reads a total of 128 bytes for each log entry
* provided by BIOS but only the first 16 bytes are used/read.
*/
#define LOG_ENTRY_SIZE 16
/*
* audit_log_entry_count_show - Reports the number of
* existing audit log entries available
* to be read
*/
static ssize_t audit_log_entry_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int ret;
u32 count = 0;
ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG_COUNT,
HPWMI_SURESTART,
&count, 1, sizeof(count));
if (ret < 0)
return ret;
return sysfs_emit(buf, "%d,%d,%d\n", count, LOG_ENTRY_SIZE,
LOG_MAX_ENTRIES);
}
/*
* audit_log_entries_show() - Return all entries found in log file
*/
static ssize_t audit_log_entries_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int ret;
int i;
u32 count = 0;
u8 audit_log_buffer[128];
// Get the number of event logs
ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG_COUNT,
HPWMI_SURESTART,
&count, 1, sizeof(count));
if (ret < 0)
return ret;
/*
* The show() api will not work if the audit logs ever go
* beyond 4KB
*/
if (count * LOG_ENTRY_SIZE > PAGE_SIZE)
return -EIO;
/*
* We are guaranteed the buffer is 4KB so today all the event
* logs will fit
*/
for (i = 0; i < count; i++) {
audit_log_buffer[0] = i + 1;
/*
* read audit log entry at a time. 'buf' input value
* provides the audit log entry to be read. On
* input, Byte 0 = Audit Log entry number from
* beginning (1..254)
* Entry number 1 is the newest entry whereas the
* highest entry number (number of entries) is the
* oldest entry.
*/
ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG,
HPWMI_SURESTART,
audit_log_buffer, 1, 128);
if (ret < 0 || (LOG_ENTRY_SIZE * i) > PAGE_SIZE) {
/*
* Encountered a failure while reading
* individual logs. Only a partial list of
* audit log will be returned.
*/
break;
} else {
memcpy(buf, audit_log_buffer, LOG_ENTRY_SIZE);
buf += LOG_ENTRY_SIZE;
}
}
return i * LOG_ENTRY_SIZE;
}
static struct kobj_attribute sure_start_audit_log_entry_count = __ATTR_RO(audit_log_entry_count);
static struct kobj_attribute sure_start_audit_log_entries = __ATTR_RO(audit_log_entries);
static struct attribute *sure_start_attrs[] = {
&sure_start_audit_log_entry_count.attr,
&sure_start_audit_log_entries.attr,
NULL
};
static const struct attribute_group sure_start_attr_group = {
.attrs = sure_start_attrs,
};
void hp_exit_sure_start_attributes(void)
{
sysfs_remove_group(bioscfg_drv.sure_start_attr_kobj,
&sure_start_attr_group);
}
int hp_populate_sure_start_data(struct kobject *attr_name_kobj)
{
bioscfg_drv.sure_start_attr_kobj = attr_name_kobj;
return sysfs_create_group(attr_name_kobj, &sure_start_attr_group);
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to integer type attributes under
* BIOS Enumeration GUID for use with hp-bioscfg driver.
*
* Copyright (c) 2022 Hewlett-Packard Inc.
*/
#include "bioscfg.h"
GET_INSTANCE_ID(integer);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_integer_instance_id(kobj);
if (instance_id < 0)
return -EIO;
return sysfs_emit(buf, "%d\n",
bioscfg_drv.integer_data[instance_id].current_value);
}
/**
* validate_integer_input() -
* Validate input of current_value against lower and upper bound
*
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_integer_input(int instance_id, char *buf)
{
int in_val;
int ret;
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
/* BIOS treats it as a read only attribute */
if (integer_data->common.is_readonly)
return -EIO;
ret = kstrtoint(buf, 10, &in_val);
if (ret < 0)
return ret;
if (in_val < integer_data->lower_bound ||
in_val > integer_data->upper_bound)
return -ERANGE;
return 0;
}
static void update_integer_value(int instance_id, char *attr_value)
{
int in_val;
int ret;
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
ret = kstrtoint(attr_value, 10, &in_val);
if (ret == 0)
integer_data->current_value = in_val;
else
pr_warn("Invalid integer value found: %s\n", attr_value);
}
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, integer);
static struct kobj_attribute integer_display_name =
__ATTR_RO(display_name);
ATTRIBUTE_PROPERTY_STORE(current_value, integer);
static struct kobj_attribute integer_current_val =
__ATTR_RW_MODE(current_value, 0644);
ATTRIBUTE_N_PROPERTY_SHOW(lower_bound, integer);
static struct kobj_attribute integer_lower_bound =
__ATTR_RO(lower_bound);
ATTRIBUTE_N_PROPERTY_SHOW(upper_bound, integer);
static struct kobj_attribute integer_upper_bound =
__ATTR_RO(upper_bound);
ATTRIBUTE_N_PROPERTY_SHOW(scalar_increment, integer);
static struct kobj_attribute integer_scalar_increment =
__ATTR_RO(scalar_increment);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "integer\n");
}
static struct kobj_attribute integer_type =
__ATTR_RO(type);
static struct attribute *integer_attrs[] = {
&common_display_langcode.attr,
&integer_display_name.attr,
&integer_current_val.attr,
&integer_lower_bound.attr,
&integer_upper_bound.attr,
&integer_scalar_increment.attr,
&integer_type.attr,
NULL
};
static const struct attribute_group integer_attr_group = {
.attrs = integer_attrs,
};
int hp_alloc_integer_data(void)
{
bioscfg_drv.integer_instances_count = hp_get_instance_count(HP_WMI_BIOS_INTEGER_GUID);
bioscfg_drv.integer_data = kcalloc(bioscfg_drv.integer_instances_count,
sizeof(*bioscfg_drv.integer_data), GFP_KERNEL);
if (!bioscfg_drv.integer_data) {
bioscfg_drv.integer_instances_count = 0;
return -ENOMEM;
}
return 0;
}
/* Expected Values types associated with each element */
static const acpi_object_type expected_integer_types[] = {
[NAME] = ACPI_TYPE_STRING,
[VALUE] = ACPI_TYPE_STRING,
[PATH] = ACPI_TYPE_STRING,
[IS_READONLY] = ACPI_TYPE_INTEGER,
[DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
[REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
[SEQUENCE] = ACPI_TYPE_INTEGER,
[PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
[PREREQUISITES] = ACPI_TYPE_STRING,
[SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
[INT_LOWER_BOUND] = ACPI_TYPE_INTEGER,
[INT_UPPER_BOUND] = ACPI_TYPE_INTEGER,
[INT_SCALAR_INCREMENT] = ACPI_TYPE_INTEGER,
};
static int hp_populate_integer_elements_from_package(union acpi_object *integer_obj,
int integer_obj_count,
int instance_id)
{
char *str_value = NULL;
int value_len;
int ret;
u32 int_value = 0;
int elem;
int reqs;
int eloc;
int size;
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
if (!integer_obj)
return -EINVAL;
for (elem = 1, eloc = 1; elem < integer_obj_count; elem++, eloc++) {
/* ONLY look at the first INTEGER_ELEM_CNT elements */
if (eloc == INT_ELEM_CNT)
goto exit_integer_package;
switch (integer_obj[elem].type) {
case ACPI_TYPE_STRING:
if (elem != PREREQUISITES) {
ret = hp_convert_hexstr_to_str(integer_obj[elem].string.pointer,
integer_obj[elem].string.length,
&str_value, &value_len);
if (ret)
continue;
}
break;
case ACPI_TYPE_INTEGER:
int_value = (u32)integer_obj[elem].integer.value;
break;
default:
pr_warn("Unsupported object type [%d]\n", integer_obj[elem].type);
continue;
}
/* Check that both expected and read object type match */
if (expected_integer_types[eloc] != integer_obj[elem].type) {
pr_err("Error expected type %d for elem %d, but got type %d instead\n",
expected_integer_types[eloc], elem, integer_obj[elem].type);
kfree(str_value);
return -EIO;
}
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
ret = kstrtoint(str_value, 10, &int_value);
if (ret)
continue;
integer_data->current_value = int_value;
break;
case PATH:
strscpy(integer_data->common.path, str_value,
sizeof(integer_data->common.path));
break;
case IS_READONLY:
integer_data->common.is_readonly = int_value;
break;
case DISPLAY_IN_UI:
integer_data->common.display_in_ui = int_value;
break;
case REQUIRES_PHYSICAL_PRESENCE:
integer_data->common.requires_physical_presence = int_value;
break;
case SEQUENCE:
integer_data->common.sequence = int_value;
break;
case PREREQUISITES_SIZE:
if (int_value > MAX_PREREQUISITES_SIZE) {
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_PREREQUISITES_SIZE;
}
integer_data->common.prerequisites_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PREREQUISITES
* object is omitted by BIOS when the size is
* zero.
*/
if (integer_data->common.prerequisites_size == 0)
eloc++;
break;
case PREREQUISITES:
size = min_t(u32, integer_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
if (elem >= integer_obj_count) {
pr_err("Error elem-objects package is too small\n");
return -EINVAL;
}
ret = hp_convert_hexstr_to_str(integer_obj[elem + reqs].string.pointer,
integer_obj[elem + reqs].string.length,
&str_value, &value_len);
if (ret)
continue;
strscpy(integer_data->common.prerequisites[reqs],
str_value,
sizeof(integer_data->common.prerequisites[reqs]));
kfree(str_value);
str_value = NULL;
}
break;
case SECURITY_LEVEL:
integer_data->common.security_level = int_value;
break;
case INT_LOWER_BOUND:
integer_data->lower_bound = int_value;
break;
case INT_UPPER_BOUND:
integer_data->upper_bound = int_value;
break;
case INT_SCALAR_INCREMENT:
integer_data->scalar_increment = int_value;
break;
default:
pr_warn("Invalid element: %d found in Integer attribute or data may be malformed\n", elem);
break;
}
kfree(str_value);
str_value = NULL;
}
exit_integer_package:
kfree(str_value);
return 0;
}
/**
* hp_populate_integer_package_data() -
* Populate all properties of an instance under integer attribute
*
* @integer_obj: ACPI object with integer data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_integer_package_data(union acpi_object *integer_obj,
int instance_id,
struct kobject *attr_name_kobj)
{
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
integer_data->attr_name_kobj = attr_name_kobj;
hp_populate_integer_elements_from_package(integer_obj,
integer_obj->package.count,
instance_id);
hp_update_attribute_permissions(integer_data->common.is_readonly,
&integer_current_val);
hp_friendly_user_name_update(integer_data->common.path,
attr_name_kobj->name,
integer_data->common.display_name,
sizeof(integer_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &integer_attr_group);
}
static int hp_populate_integer_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
int instance_id)
{
char *dst = NULL;
int dst_size = *buffer_size / sizeof(u16);
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
int ret = 0;
dst = kcalloc(dst_size, sizeof(char), GFP_KERNEL);
if (!dst)
return -ENOMEM;
/*
* Only data relevant to this driver and its functionality is
* read. BIOS defines the order in which each * element is
* read. Element 0 data is not relevant to this
* driver hence it is ignored. For clarity, all element names
* (DISPLAY_IN_UI) which defines the order in which is read
* and the name matches the variable where the data is stored.
*
* In earlier implementation, reported errors were ignored
* causing the data to remain uninitialized. It is not
* possible to determine if data read from BIOS is valid or
* not. It is for this reason functions may return a error
* without validating the data itself.
*/
// VALUE:
integer_data->current_value = 0;
hp_get_string_from_buffer(&buffer_ptr, buffer_size, dst, dst_size);
ret = kstrtoint(dst, 10, &integer_data->current_value);
if (ret)
pr_warn("Unable to convert string to integer: %s\n", dst);
kfree(dst);
// COMMON:
ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &integer_data->common);
if (ret < 0)
goto buffer_exit;
// INT_LOWER_BOUND:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&integer_data->lower_bound);
if (ret < 0)
goto buffer_exit;
// INT_UPPER_BOUND:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&integer_data->upper_bound);
if (ret < 0)
goto buffer_exit;
// INT_SCALAR_INCREMENT:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&integer_data->scalar_increment);
buffer_exit:
return ret;
}
/**
* hp_populate_integer_buffer_data() -
* Populate all properties of an instance under integer attribute
*
* @buffer_ptr: Buffer pointer
* @buffer_size: Buffer size
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_integer_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
struct kobject *attr_name_kobj)
{
struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
int ret = 0;
integer_data->attr_name_kobj = attr_name_kobj;
/* Populate integer elements */
ret = hp_populate_integer_elements_from_buffer(buffer_ptr, buffer_size,
instance_id);
if (ret < 0)
return ret;
hp_update_attribute_permissions(integer_data->common.is_readonly,
&integer_current_val);
hp_friendly_user_name_update(integer_data->common.path,
attr_name_kobj->name,
integer_data->common.display_name,
sizeof(integer_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &integer_attr_group);
}
/**
* hp_exit_integer_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void hp_exit_integer_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < bioscfg_drv.integer_instances_count;
instance_id++) {
struct kobject *attr_name_kobj =
bioscfg_drv.integer_data[instance_id].attr_name_kobj;
if (attr_name_kobj)
sysfs_remove_group(attr_name_kobj, &integer_attr_group);
}
bioscfg_drv.integer_instances_count = 0;
kfree(bioscfg_drv.integer_data);
bioscfg_drv.integer_data = NULL;
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/int-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common methods for use with hp-bioscfg driver
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wmi.h>
#include "bioscfg.h"
#include "../../firmware_attributes_class.h"
#include <linux/nls.h>
#include <linux/errno.h>
MODULE_AUTHOR("Jorge Lopez <[email protected]>");
MODULE_DESCRIPTION("HP BIOS Configuration Driver");
MODULE_LICENSE("GPL");
struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
};
static struct class *fw_attr_class;
ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", LANG_CODE_STR);
}
struct kobj_attribute common_display_langcode =
__ATTR_RO(display_name_language_code);
int hp_get_integer_from_buffer(u8 **buffer, u32 *buffer_size, u32 *integer)
{
int *ptr = PTR_ALIGN((int *)*buffer, sizeof(int));
/* Ensure there is enough space remaining to read the integer */
if (*buffer_size < sizeof(int))
return -EINVAL;
*integer = *(ptr++);
*buffer = (u8 *)ptr;
*buffer_size -= sizeof(int);
return 0;
}
int hp_get_string_from_buffer(u8 **buffer, u32 *buffer_size, char *dst, u32 dst_size)
{
u16 *src = (u16 *)*buffer;
u16 src_size;
u16 size;
int i;
int conv_dst_size;
if (*buffer_size < sizeof(u16))
return -EINVAL;
src_size = *(src++);
/* size value in u16 chars */
size = src_size / sizeof(u16);
/* Ensure there is enough space remaining to read and convert
* the string
*/
if (*buffer_size < src_size)
return -EINVAL;
for (i = 0; i < size; i++)
if (src[i] == '\\' ||
src[i] == '\r' ||
src[i] == '\n' ||
src[i] == '\t')
size++;
/*
* Conversion is limited to destination string max number of
* bytes.
*/
conv_dst_size = size;
if (size > dst_size)
conv_dst_size = dst_size - 1;
/*
* convert from UTF-16 unicode to ASCII
*/
utf16s_to_utf8s(src, src_size, UTF16_HOST_ENDIAN, dst, conv_dst_size);
dst[conv_dst_size] = 0;
for (i = 0; i < conv_dst_size; i++) {
if (*src == '\\' ||
*src == '\r' ||
*src == '\n' ||
*src == '\t') {
dst[i++] = '\\';
if (i == conv_dst_size)
break;
}
if (*src == '\r')
dst[i] = 'r';
else if (*src == '\n')
dst[i] = 'n';
else if (*src == '\t')
dst[i] = 't';
else if (*src == '"')
dst[i] = '\'';
else
dst[i] = *src;
src++;
}
*buffer = (u8 *)src;
*buffer_size -= size * sizeof(u16);
return size;
}
int hp_get_common_data_from_buffer(u8 **buffer_ptr, u32 *buffer_size,
struct common_data *common_data)
{
int ret = 0;
int reqs;
// PATH:
ret = hp_get_string_from_buffer(buffer_ptr, buffer_size, common_data->path,
sizeof(common_data->path));
if (ret < 0)
goto common_exit;
// IS_READONLY:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->is_readonly);
if (ret < 0)
goto common_exit;
//DISPLAY_IN_UI:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->display_in_ui);
if (ret < 0)
goto common_exit;
// REQUIRES_PHYSICAL_PRESENCE:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->requires_physical_presence);
if (ret < 0)
goto common_exit;
// SEQUENCE:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->sequence);
if (ret < 0)
goto common_exit;
// PREREQUISITES_SIZE:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->prerequisites_size);
if (ret < 0)
goto common_exit;
if (common_data->prerequisites_size > MAX_PREREQUISITES_SIZE) {
/* Report a message and limit prerequisite size to maximum value */
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
common_data->prerequisites_size = MAX_PREREQUISITES_SIZE;
}
// PREREQUISITES:
for (reqs = 0; reqs < common_data->prerequisites_size; reqs++) {
ret = hp_get_string_from_buffer(buffer_ptr, buffer_size,
common_data->prerequisites[reqs],
sizeof(common_data->prerequisites[reqs]));
if (ret < 0)
break;
}
// SECURITY_LEVEL:
ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
&common_data->security_level);
common_exit:
return ret;
}
int hp_enforce_single_line_input(char *buf, size_t count)
{
char *p;
p = memchr(buf, '\n', count);
if (p == buf + count - 1)
*p = '\0'; /* strip trailing newline */
else if (p)
return -EINVAL; /* enforce single line input */
return 0;
}
/* Set pending reboot value and generate KOBJ_NAME event */
void hp_set_reboot_and_signal_event(void)
{
bioscfg_drv.pending_reboot = true;
kobject_uevent(&bioscfg_drv.class_dev->kobj, KOBJ_CHANGE);
}
/**
* hp_calculate_string_buffer() - determines size of string buffer for
* use with BIOS communication
*
* @str: the string to calculate based upon
*/
size_t hp_calculate_string_buffer(const char *str)
{
size_t length = strlen(str);
/* BIOS expects 4 bytes when an empty string is found */
if (length == 0)
return 4;
/* u16 length field + one UTF16 char for each input char */
return sizeof(u16) + strlen(str) * sizeof(u16);
}
int hp_wmi_error_and_message(int error_code)
{
char *error_msg = NULL;
int ret;
switch (error_code) {
case SUCCESS:
error_msg = "Success";
ret = 0;
break;
case CMD_FAILED:
error_msg = "Command failed";
ret = -EINVAL;
break;
case INVALID_SIGN:
error_msg = "Invalid signature";
ret = -EINVAL;
break;
case INVALID_CMD_VALUE:
error_msg = "Invalid command value/Feature not supported";
ret = -EOPNOTSUPP;
break;
case INVALID_CMD_TYPE:
error_msg = "Invalid command type";
ret = -EINVAL;
break;
case INVALID_DATA_SIZE:
error_msg = "Invalid data size";
ret = -EINVAL;
break;
case INVALID_CMD_PARAM:
error_msg = "Invalid command parameter";
ret = -EINVAL;
break;
case ENCRYP_CMD_REQUIRED:
error_msg = "Secure/encrypted command required";
ret = -EACCES;
break;
case NO_SECURE_SESSION:
error_msg = "No secure session established";
ret = -EACCES;
break;
case SECURE_SESSION_FOUND:
error_msg = "Secure session already established";
ret = -EACCES;
break;
case SECURE_SESSION_FAILED:
error_msg = "Secure session failed";
ret = -EIO;
break;
case AUTH_FAILED:
error_msg = "Other permission/Authentication failed";
ret = -EACCES;
break;
case INVALID_BIOS_AUTH:
error_msg = "Invalid BIOS administrator password";
ret = -EINVAL;
break;
case NONCE_DID_NOT_MATCH:
error_msg = "Nonce did not match";
ret = -EINVAL;
break;
case GENERIC_ERROR:
error_msg = "Generic/Other error";
ret = -EIO;
break;
case BIOS_ADMIN_POLICY_NOT_MET:
error_msg = "BIOS Admin password does not meet password policy requirements";
ret = -EINVAL;
break;
case BIOS_ADMIN_NOT_SET:
error_msg = "BIOS Setup password is not set";
ret = -EPERM;
break;
case P21_NO_PROVISIONED:
error_msg = "P21 is not provisioned";
ret = -EPERM;
break;
case P21_PROVISION_IN_PROGRESS:
error_msg = "P21 is already provisioned or provisioning is in progress and a signing key has already been sent";
ret = -EINPROGRESS;
break;
case P21_IN_USE:
error_msg = "P21 in use (cannot deprovision)";
ret = -EPERM;
break;
case HEP_NOT_ACTIVE:
error_msg = "HEP not activated";
ret = -EPERM;
break;
case HEP_ALREADY_SET:
error_msg = "HEP Transport already set";
ret = -EINVAL;
break;
case HEP_CHECK_STATE:
error_msg = "Check the current HEP state";
ret = -EINVAL;
break;
default:
error_msg = "Generic/Other error";
ret = -EIO;
break;
}
if (error_code)
pr_warn_ratelimited("Returned error 0x%x, \"%s\"\n", error_code, error_msg);
return ret;
}
static ssize_t pending_reboot_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", bioscfg_drv.pending_reboot);
}
static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
/*
* create_attributes_level_sysfs_files() - Creates pending_reboot attributes
*/
static int create_attributes_level_sysfs_files(void)
{
return sysfs_create_file(&bioscfg_drv.main_dir_kset->kobj,
&pending_reboot.attr);
}
static void attr_name_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct kobj_type attr_name_ktype = {
.release = attr_name_release,
.sysfs_ops = &kobj_sysfs_ops,
};
/**
* hp_get_wmiobj_pointer() - Get Content of WMI block for particular instance
*
* @instance_id: WMI instance ID
* @guid_string: WMI GUID (in str form)
*
* Fetches the content for WMI block (instance_id) under GUID (guid_string)
* Caller must kfree the return
*/
union acpi_object *hp_get_wmiobj_pointer(int instance_id, const char *guid_string)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
status = wmi_query_block(guid_string, instance_id, &out);
return ACPI_SUCCESS(status) ? (union acpi_object *)out.pointer : NULL;
}
/**
* hp_get_instance_count() - Compute total number of instances under guid_string
*
* @guid_string: WMI GUID (in string form)
*/
int hp_get_instance_count(const char *guid_string)
{
union acpi_object *wmi_obj = NULL;
int i = 0;
do {
kfree(wmi_obj);
wmi_obj = hp_get_wmiobj_pointer(i, guid_string);
i++;
} while (wmi_obj);
return i - 1;
}
/**
* hp_alloc_attributes_data() - Allocate attributes data for a particular type
*
* @attr_type: Attribute type to allocate
*/
static int hp_alloc_attributes_data(int attr_type)
{
switch (attr_type) {
case HPWMI_STRING_TYPE:
return hp_alloc_string_data();
case HPWMI_INTEGER_TYPE:
return hp_alloc_integer_data();
case HPWMI_ENUMERATION_TYPE:
return hp_alloc_enumeration_data();
case HPWMI_ORDERED_LIST_TYPE:
return hp_alloc_ordered_list_data();
case HPWMI_PASSWORD_TYPE:
return hp_alloc_password_data();
default:
return 0;
}
}
int hp_convert_hexstr_to_str(const char *input, u32 input_len, char **str, int *len)
{
int ret = 0;
int new_len = 0;
char tmp[] = "0x00";
char *new_str = NULL;
long ch;
int i;
if (input_len <= 0 || !input || !str || !len)
return -EINVAL;
*len = 0;
*str = NULL;
new_str = kmalloc(input_len, GFP_KERNEL);
if (!new_str)
return -ENOMEM;
for (i = 0; i < input_len; i += 5) {
strncpy(tmp, input + i, strlen(tmp));
if (kstrtol(tmp, 16, &ch) == 0) {
// escape char
if (ch == '\\' ||
ch == '\r' ||
ch == '\n' || ch == '\t') {
if (ch == '\r')
ch = 'r';
else if (ch == '\n')
ch = 'n';
else if (ch == '\t')
ch = 't';
new_str[new_len++] = '\\';
}
new_str[new_len++] = ch;
if (ch == '\0')
break;
}
}
if (new_len) {
new_str[new_len] = '\0';
*str = krealloc(new_str, (new_len + 1) * sizeof(char),
GFP_KERNEL);
if (*str)
*len = new_len;
else
ret = -ENOMEM;
} else {
ret = -EFAULT;
}
if (ret)
kfree(new_str);
return ret;
}
/* map output size to the corresponding WMI method id */
int hp_encode_outsize_for_pvsz(int outsize)
{
if (outsize > 4096)
return -EINVAL;
if (outsize > 1024)
return 5;
if (outsize > 128)
return 4;
if (outsize > 4)
return 3;
if (outsize > 0)
return 2;
return 1;
}
/*
* Update friendly display name for several attributes associated to
* 'Schedule Power-On'
*/
void hp_friendly_user_name_update(char *path, const char *attr_name,
char *attr_display, int attr_size)
{
if (strstr(path, SCHEDULE_POWER_ON))
snprintf(attr_display, attr_size, "%s - %s", SCHEDULE_POWER_ON, attr_name);
else
strscpy(attr_display, attr_name, attr_size);
}
/**
* hp_update_attribute_permissions() - Update attributes permissions when
* isReadOnly value is 1
*
* @is_readonly: bool value to indicate if it a readonly attribute.
* @current_val: kobj_attribute corresponding to attribute.
*
*/
void hp_update_attribute_permissions(bool is_readonly, struct kobj_attribute *current_val)
{
current_val->attr.mode = is_readonly ? 0444 : 0644;
}
/**
* destroy_attribute_objs() - Free a kset of kobjects
* @kset: The kset to destroy
*
* Fress kobjects created for each attribute_name under attribute type kset
*/
static void destroy_attribute_objs(struct kset *kset)
{
struct kobject *pos, *next;
list_for_each_entry_safe(pos, next, &kset->list, entry)
kobject_put(pos);
}
/**
* release_attributes_data() - Clean-up all sysfs directories and files created
*/
static void release_attributes_data(void)
{
mutex_lock(&bioscfg_drv.mutex);
hp_exit_string_attributes();
hp_exit_integer_attributes();
hp_exit_enumeration_attributes();
hp_exit_ordered_list_attributes();
hp_exit_password_attributes();
hp_exit_sure_start_attributes();
hp_exit_secure_platform_attributes();
if (bioscfg_drv.authentication_dir_kset) {
destroy_attribute_objs(bioscfg_drv.authentication_dir_kset);
kset_unregister(bioscfg_drv.authentication_dir_kset);
bioscfg_drv.authentication_dir_kset = NULL;
}
if (bioscfg_drv.main_dir_kset) {
sysfs_remove_file(&bioscfg_drv.main_dir_kset->kobj, &pending_reboot.attr);
destroy_attribute_objs(bioscfg_drv.main_dir_kset);
kset_unregister(bioscfg_drv.main_dir_kset);
bioscfg_drv.main_dir_kset = NULL;
}
mutex_unlock(&bioscfg_drv.mutex);
}
/**
* hp_add_other_attributes() - Initialize HP custom attributes not
* reported by BIOS and required to support Secure Platform and Sure
* Start.
*
* @attr_type: Custom HP attribute not reported by BIOS
*
* Initialize all 2 types of attributes: Platform and Sure Start
* object. Populates each attribute types respective properties
* under sysfs files.
*
* Returns zero(0) if successful. Otherwise, a negative value.
*/
static int hp_add_other_attributes(int attr_type)
{
struct kobject *attr_name_kobj;
union acpi_object *obj = NULL;
int ret;
char *attr_name;
mutex_lock(&bioscfg_drv.mutex);
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
if (!attr_name_kobj) {
ret = -ENOMEM;
goto err_other_attr_init;
}
/* Check if attribute type is supported */
switch (attr_type) {
case HPWMI_SECURE_PLATFORM_TYPE:
attr_name_kobj->kset = bioscfg_drv.authentication_dir_kset;
attr_name = SPM_STR;
break;
case HPWMI_SURE_START_TYPE:
attr_name_kobj->kset = bioscfg_drv.main_dir_kset;
attr_name = SURE_START_STR;
break;
default:
pr_err("Error: Unknown attr_type: %d\n", attr_type);
ret = -EINVAL;
goto err_other_attr_init;
}
ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
NULL, "%s", attr_name);
if (ret) {
pr_err("Error encountered [%d]\n", ret);
kobject_put(attr_name_kobj);
goto err_other_attr_init;
}
/* Populate attribute data */
switch (attr_type) {
case HPWMI_SECURE_PLATFORM_TYPE:
ret = hp_populate_secure_platform_data(attr_name_kobj);
if (ret)
goto err_other_attr_init;
break;
case HPWMI_SURE_START_TYPE:
ret = hp_populate_sure_start_data(attr_name_kobj);
if (ret)
goto err_other_attr_init;
break;
default:
ret = -EINVAL;
goto err_other_attr_init;
}
mutex_unlock(&bioscfg_drv.mutex);
return 0;
err_other_attr_init:
mutex_unlock(&bioscfg_drv.mutex);
kfree(obj);
return ret;
}
static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type,
union acpi_object *obj,
const char *guid, int min_elements,
int instance_id)
{
struct kobject *attr_name_kobj;
union acpi_object *elements;
struct kset *temp_kset;
char *str_value = NULL;
int str_len;
int ret = 0;
/* Take action appropriate to each ACPI TYPE */
if (obj->package.count < min_elements) {
pr_err("ACPI-package does not have enough elements: %d < %d\n",
obj->package.count, min_elements);
goto pack_attr_exit;
}
elements = obj->package.elements;
/* sanity checking */
if (elements[NAME].type != ACPI_TYPE_STRING) {
pr_debug("incorrect element type\n");
goto pack_attr_exit;
}
if (strlen(elements[NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto pack_attr_exit;
}
if (attr_type == HPWMI_PASSWORD_TYPE)
temp_kset = bioscfg_drv.authentication_dir_kset;
else
temp_kset = bioscfg_drv.main_dir_kset;
/* convert attribute name to string */
ret = hp_convert_hexstr_to_str(elements[NAME].string.pointer,
elements[NAME].string.length,
&str_value, &str_len);
if (ret) {
pr_debug("Failed to populate integer package data. Error [0%0x]\n",
ret);
kfree(str_value);
return ret;
}
/* All duplicate attributes found are ignored */
if (kset_find_obj(temp_kset, str_value)) {
pr_debug("Duplicate attribute name found - %s\n", str_value);
goto pack_attr_exit;
}
/* build attribute */
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
if (!attr_name_kobj) {
ret = -ENOMEM;
goto pack_attr_exit;
}
attr_name_kobj->kset = temp_kset;
ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
NULL, "%s", str_value);
if (ret) {
kobject_put(attr_name_kobj);
goto pack_attr_exit;
}
/* enumerate all of these attributes */
switch (attr_type) {
case HPWMI_STRING_TYPE:
ret = hp_populate_string_package_data(elements,
instance_id,
attr_name_kobj);
break;
case HPWMI_INTEGER_TYPE:
ret = hp_populate_integer_package_data(elements,
instance_id,
attr_name_kobj);
break;
case HPWMI_ENUMERATION_TYPE:
ret = hp_populate_enumeration_package_data(elements,
instance_id,
attr_name_kobj);
break;
case HPWMI_ORDERED_LIST_TYPE:
ret = hp_populate_ordered_list_package_data(elements,
instance_id,
attr_name_kobj);
break;
case HPWMI_PASSWORD_TYPE:
ret = hp_populate_password_package_data(elements,
instance_id,
attr_name_kobj);
break;
default:
pr_debug("Unknown attribute type found: 0x%x\n", attr_type);
break;
}
pack_attr_exit:
kfree(str_value);
return ret;
}
static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type,
union acpi_object *obj,
const char *guid, int min_elements,
int instance_id)
{
struct kobject *attr_name_kobj;
struct kset *temp_kset;
char str[MAX_BUFF_SIZE];
char *temp_str = NULL;
char *str_value = NULL;
u8 *buffer_ptr = NULL;
int buffer_size;
int ret = 0;
buffer_size = obj->buffer.length;
buffer_ptr = obj->buffer.pointer;
ret = hp_get_string_from_buffer(&buffer_ptr,
&buffer_size, str, MAX_BUFF_SIZE);
if (ret < 0)
goto buff_attr_exit;
if (attr_type == HPWMI_PASSWORD_TYPE ||
attr_type == HPWMI_SECURE_PLATFORM_TYPE)
temp_kset = bioscfg_drv.authentication_dir_kset;
else
temp_kset = bioscfg_drv.main_dir_kset;
/* All duplicate attributes found are ignored */
if (kset_find_obj(temp_kset, str)) {
pr_debug("Duplicate attribute name found - %s\n", str);
goto buff_attr_exit;
}
/* build attribute */
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
if (!attr_name_kobj) {
ret = -ENOMEM;
goto buff_attr_exit;
}
attr_name_kobj->kset = temp_kset;
temp_str = str;
if (attr_type == HPWMI_SECURE_PLATFORM_TYPE)
temp_str = "SPM";
ret = kobject_init_and_add(attr_name_kobj,
&attr_name_ktype, NULL, "%s", temp_str);
if (ret) {
kobject_put(attr_name_kobj);
goto buff_attr_exit;
}
/* enumerate all of these attributes */
switch (attr_type) {
case HPWMI_STRING_TYPE:
ret = hp_populate_string_buffer_data(buffer_ptr,
&buffer_size,
instance_id,
attr_name_kobj);
break;
case HPWMI_INTEGER_TYPE:
ret = hp_populate_integer_buffer_data(buffer_ptr,
&buffer_size,
instance_id,
attr_name_kobj);
break;
case HPWMI_ENUMERATION_TYPE:
ret = hp_populate_enumeration_buffer_data(buffer_ptr,
&buffer_size,
instance_id,
attr_name_kobj);
break;
case HPWMI_ORDERED_LIST_TYPE:
ret = hp_populate_ordered_list_buffer_data(buffer_ptr,
&buffer_size,
instance_id,
attr_name_kobj);
break;
case HPWMI_PASSWORD_TYPE:
ret = hp_populate_password_buffer_data(buffer_ptr,
&buffer_size,
instance_id,
attr_name_kobj);
break;
default:
pr_debug("Unknown attribute type found: 0x%x\n", attr_type);
break;
}
buff_attr_exit:
kfree(str_value);
return ret;
}
/**
* hp_init_bios_attributes() - Initialize all attributes for a type
* @attr_type: The attribute type to initialize
* @guid: The WMI GUID associated with this type to initialize
*
* Initialize all 5 types of attributes: enumeration, integer,
* string, password, ordered list object. Populates each attribute types
* respective properties under sysfs files
*/
static int hp_init_bios_attributes(enum hp_wmi_data_type attr_type, const char *guid)
{
union acpi_object *obj = NULL;
int min_elements;
/* instance_id needs to be reset for each type GUID
* also, instance IDs are unique within GUID but not across
*/
int instance_id = 0;
int cur_instance_id = instance_id;
int ret = 0;
ret = hp_alloc_attributes_data(attr_type);
if (ret)
return ret;
switch (attr_type) {
case HPWMI_STRING_TYPE:
min_elements = STR_ELEM_CNT;
break;
case HPWMI_INTEGER_TYPE:
min_elements = INT_ELEM_CNT;
break;
case HPWMI_ENUMERATION_TYPE:
min_elements = ENUM_ELEM_CNT;
break;
case HPWMI_ORDERED_LIST_TYPE:
min_elements = ORD_ELEM_CNT;
break;
case HPWMI_PASSWORD_TYPE:
min_elements = PSWD_ELEM_CNT;
break;
default:
pr_err("Error: Unknown attr_type: %d\n", attr_type);
return -EINVAL;
}
/* need to use specific instance_id and guid combination to get right data */
obj = hp_get_wmiobj_pointer(instance_id, guid);
if (!obj)
return -ENODEV;
mutex_lock(&bioscfg_drv.mutex);
while (obj) {
/* Take action appropriate to each ACPI TYPE */
if (obj->type == ACPI_TYPE_PACKAGE) {
ret = hp_init_bios_package_attribute(attr_type, obj,
guid, min_elements,
cur_instance_id);
} else if (obj->type == ACPI_TYPE_BUFFER) {
ret = hp_init_bios_buffer_attribute(attr_type, obj,
guid, min_elements,
cur_instance_id);
} else {
pr_err("Expected ACPI-package or buffer type, got: %d\n",
obj->type);
ret = -EIO;
goto err_attr_init;
}
/*
* Failure reported in one attribute must not
* stop process of the remaining attribute values.
*/
if (ret >= 0)
cur_instance_id++;
kfree(obj);
instance_id++;
obj = hp_get_wmiobj_pointer(instance_id, guid);
}
err_attr_init:
mutex_unlock(&bioscfg_drv.mutex);
kfree(obj);
return ret;
}
static int __init hp_init(void)
{
int ret;
int hp_bios_capable = wmi_has_guid(HP_WMI_BIOS_GUID);
int set_bios_settings = wmi_has_guid(HP_WMI_SET_BIOS_SETTING_GUID);
if (!hp_bios_capable) {
pr_err("Unable to run on non-HP system\n");
return -ENODEV;
}
if (!set_bios_settings) {
pr_err("Unable to set BIOS settings on HP systems\n");
return -ENODEV;
}
ret = hp_init_attr_set_interface();
if (ret)
return ret;
ret = fw_attributes_class_get(&fw_attr_class);
if (ret)
goto err_unregister_class;
bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(bioscfg_drv.class_dev)) {
ret = PTR_ERR(bioscfg_drv.class_dev);
goto err_unregister_class;
}
bioscfg_drv.main_dir_kset = kset_create_and_add("attributes", NULL,
&bioscfg_drv.class_dev->kobj);
if (!bioscfg_drv.main_dir_kset) {
ret = -ENOMEM;
pr_debug("Failed to create and add attributes\n");
goto err_destroy_classdev;
}
bioscfg_drv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
&bioscfg_drv.class_dev->kobj);
if (!bioscfg_drv.authentication_dir_kset) {
ret = -ENOMEM;
pr_debug("Failed to create and add authentication\n");
goto err_release_attributes_data;
}
/*
* sysfs level attributes.
* - pending_reboot
*/
ret = create_attributes_level_sysfs_files();
if (ret)
pr_debug("Failed to create sysfs level attributes\n");
ret = hp_init_bios_attributes(HPWMI_STRING_TYPE, HP_WMI_BIOS_STRING_GUID);
if (ret)
pr_debug("Failed to populate string type attributes\n");
ret = hp_init_bios_attributes(HPWMI_INTEGER_TYPE, HP_WMI_BIOS_INTEGER_GUID);
if (ret)
pr_debug("Failed to populate integer type attributes\n");
ret = hp_init_bios_attributes(HPWMI_ENUMERATION_TYPE, HP_WMI_BIOS_ENUMERATION_GUID);
if (ret)
pr_debug("Failed to populate enumeration type attributes\n");
ret = hp_init_bios_attributes(HPWMI_ORDERED_LIST_TYPE, HP_WMI_BIOS_ORDERED_LIST_GUID);
if (ret)
pr_debug("Failed to populate ordered list object type attributes\n");
ret = hp_init_bios_attributes(HPWMI_PASSWORD_TYPE, HP_WMI_BIOS_PASSWORD_GUID);
if (ret)
pr_debug("Failed to populate password object type attributes\n");
bioscfg_drv.spm_data.attr_name_kobj = NULL;
ret = hp_add_other_attributes(HPWMI_SECURE_PLATFORM_TYPE);
if (ret)
pr_debug("Failed to populate secure platform object type attribute\n");
bioscfg_drv.sure_start_attr_kobj = NULL;
ret = hp_add_other_attributes(HPWMI_SURE_START_TYPE);
if (ret)
pr_debug("Failed to populate sure start object type attribute\n");
return 0;
err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
device_destroy(fw_attr_class, MKDEV(0, 0));
err_unregister_class:
fw_attributes_class_put();
hp_exit_attr_set_interface();
return ret;
}
static void __exit hp_exit(void)
{
release_attributes_data();
device_destroy(fw_attr_class, MKDEV(0, 0));
fw_attributes_class_put();
hp_exit_attr_set_interface();
}
module_init(hp_init);
module_exit(hp_exit);
| linux-master | drivers/platform/x86/hp/hp-bioscfg/bioscfg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to string type attributes under
* HP_WMI_BIOS_STRING_GUID for use with hp-bioscfg driver.
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
#define WMI_STRING_TYPE "HPBIOS_BIOSString"
GET_INSTANCE_ID(string);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_string_instance_id(kobj);
if (instance_id < 0)
return -EIO;
return sysfs_emit(buf, "%s\n",
bioscfg_drv.string_data[instance_id].current_value);
}
/**
* validate_string_input() -
* Validate input of current_value against min and max lengths
*
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_string_input(int instance_id, const char *buf)
{
int in_len = strlen(buf);
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
/* BIOS treats it as a read only attribute */
if (string_data->common.is_readonly)
return -EIO;
if (in_len < string_data->min_length || in_len > string_data->max_length)
return -ERANGE;
return 0;
}
static void update_string_value(int instance_id, char *attr_value)
{
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
/* Write settings to BIOS */
strscpy(string_data->current_value, attr_value, sizeof(string_data->current_value));
}
/*
* ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name_language_code, string);
* static struct kobj_attribute string_display_langcode =
* __ATTR_RO(display_name_language_code);
*/
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, string);
static struct kobj_attribute string_display_name =
__ATTR_RO(display_name);
ATTRIBUTE_PROPERTY_STORE(current_value, string);
static struct kobj_attribute string_current_val =
__ATTR_RW_MODE(current_value, 0644);
ATTRIBUTE_N_PROPERTY_SHOW(min_length, string);
static struct kobj_attribute string_min_length =
__ATTR_RO(min_length);
ATTRIBUTE_N_PROPERTY_SHOW(max_length, string);
static struct kobj_attribute string_max_length =
__ATTR_RO(max_length);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "string\n");
}
static struct kobj_attribute string_type =
__ATTR_RO(type);
static struct attribute *string_attrs[] = {
&common_display_langcode.attr,
&string_display_name.attr,
&string_current_val.attr,
&string_min_length.attr,
&string_max_length.attr,
&string_type.attr,
NULL
};
static const struct attribute_group string_attr_group = {
.attrs = string_attrs,
};
int hp_alloc_string_data(void)
{
bioscfg_drv.string_instances_count = hp_get_instance_count(HP_WMI_BIOS_STRING_GUID);
bioscfg_drv.string_data = kcalloc(bioscfg_drv.string_instances_count,
sizeof(*bioscfg_drv.string_data), GFP_KERNEL);
if (!bioscfg_drv.string_data) {
bioscfg_drv.string_instances_count = 0;
return -ENOMEM;
}
return 0;
}
/* Expected Values types associated with each element */
static const acpi_object_type expected_string_types[] = {
[NAME] = ACPI_TYPE_STRING,
[VALUE] = ACPI_TYPE_STRING,
[PATH] = ACPI_TYPE_STRING,
[IS_READONLY] = ACPI_TYPE_INTEGER,
[DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
[REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
[SEQUENCE] = ACPI_TYPE_INTEGER,
[PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
[PREREQUISITES] = ACPI_TYPE_STRING,
[SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
[STR_MIN_LENGTH] = ACPI_TYPE_INTEGER,
[STR_MAX_LENGTH] = ACPI_TYPE_INTEGER,
};
static int hp_populate_string_elements_from_package(union acpi_object *string_obj,
int string_obj_count,
int instance_id)
{
char *str_value = NULL;
int value_len;
int ret = 0;
u32 int_value = 0;
int elem;
int reqs;
int eloc;
int size;
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
if (!string_obj)
return -EINVAL;
for (elem = 1, eloc = 1; elem < string_obj_count; elem++, eloc++) {
/* ONLY look at the first STRING_ELEM_CNT elements */
if (eloc == STR_ELEM_CNT)
goto exit_string_package;
switch (string_obj[elem].type) {
case ACPI_TYPE_STRING:
if (elem != PREREQUISITES) {
ret = hp_convert_hexstr_to_str(string_obj[elem].string.pointer,
string_obj[elem].string.length,
&str_value, &value_len);
if (ret)
continue;
}
break;
case ACPI_TYPE_INTEGER:
int_value = (u32)string_obj[elem].integer.value;
break;
default:
pr_warn("Unsupported object type [%d]\n", string_obj[elem].type);
continue;
}
/* Check that both expected and read object type match */
if (expected_string_types[eloc] != string_obj[elem].type) {
pr_err("Error expected type %d for elem %d, but got type %d instead\n",
expected_string_types[eloc], elem, string_obj[elem].type);
kfree(str_value);
return -EIO;
}
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
strscpy(string_data->current_value,
str_value, sizeof(string_data->current_value));
break;
case PATH:
strscpy(string_data->common.path, str_value,
sizeof(string_data->common.path));
break;
case IS_READONLY:
string_data->common.is_readonly = int_value;
break;
case DISPLAY_IN_UI:
string_data->common.display_in_ui = int_value;
break;
case REQUIRES_PHYSICAL_PRESENCE:
string_data->common.requires_physical_presence = int_value;
break;
case SEQUENCE:
string_data->common.sequence = int_value;
break;
case PREREQUISITES_SIZE:
if (int_value > MAX_PREREQUISITES_SIZE) {
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_PREREQUISITES_SIZE;
}
string_data->common.prerequisites_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PREREQUISITES
* object is omitted by BIOS when the size is
* zero.
*/
if (string_data->common.prerequisites_size == 0)
eloc++;
break;
case PREREQUISITES:
size = min_t(u32, string_data->common.prerequisites_size,
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
if (elem >= string_obj_count) {
pr_err("Error elem-objects package is too small\n");
return -EINVAL;
}
ret = hp_convert_hexstr_to_str(string_obj[elem + reqs].string.pointer,
string_obj[elem + reqs].string.length,
&str_value, &value_len);
if (ret)
continue;
strscpy(string_data->common.prerequisites[reqs],
str_value,
sizeof(string_data->common.prerequisites[reqs]));
kfree(str_value);
str_value = NULL;
}
break;
case SECURITY_LEVEL:
string_data->common.security_level = int_value;
break;
case STR_MIN_LENGTH:
string_data->min_length = int_value;
break;
case STR_MAX_LENGTH:
string_data->max_length = int_value;
break;
default:
pr_warn("Invalid element: %d found in String attribute or data may be malformed\n", elem);
break;
}
kfree(str_value);
str_value = NULL;
}
exit_string_package:
kfree(str_value);
return 0;
}
/**
* hp_populate_string_package_data() -
* Populate all properties of an instance under string attribute
*
* @string_obj: ACPI object with string data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_string_package_data(union acpi_object *string_obj,
int instance_id,
struct kobject *attr_name_kobj)
{
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
string_data->attr_name_kobj = attr_name_kobj;
hp_populate_string_elements_from_package(string_obj,
string_obj->package.count,
instance_id);
hp_update_attribute_permissions(string_data->common.is_readonly,
&string_current_val);
hp_friendly_user_name_update(string_data->common.path,
attr_name_kobj->name,
string_data->common.display_name,
sizeof(string_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &string_attr_group);
}
static int hp_populate_string_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
int instance_id)
{
int ret = 0;
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
/*
* Only data relevant to this driver and its functionality is
* read. BIOS defines the order in which each * element is
* read. Element 0 data is not relevant to this
* driver hence it is ignored. For clarity, all element names
* (DISPLAY_IN_UI) which defines the order in which is read
* and the name matches the variable where the data is stored.
*
* In earlier implementation, reported errors were ignored
* causing the data to remain uninitialized. It is not
* possible to determine if data read from BIOS is valid or
* not. It is for this reason functions may return a error
* without validating the data itself.
*/
// VALUE:
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, string_data->current_value,
sizeof(string_data->current_value));
if (ret < 0)
goto buffer_exit;
// COMMON:
ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &string_data->common);
if (ret < 0)
goto buffer_exit;
// STR_MIN_LENGTH:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&string_data->min_length);
if (ret < 0)
goto buffer_exit;
// STR_MAX_LENGTH:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&string_data->max_length);
buffer_exit:
return ret;
}
/**
* hp_populate_string_buffer_data() -
* Populate all properties of an instance under string attribute
*
* @buffer_ptr: Buffer pointer
* @buffer_size: Buffer size
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_string_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
int instance_id,
struct kobject *attr_name_kobj)
{
struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
int ret = 0;
string_data->attr_name_kobj = attr_name_kobj;
ret = hp_populate_string_elements_from_buffer(buffer_ptr, buffer_size,
instance_id);
if (ret < 0)
return ret;
hp_update_attribute_permissions(string_data->common.is_readonly,
&string_current_val);
hp_friendly_user_name_update(string_data->common.path,
attr_name_kobj->name,
string_data->common.display_name,
sizeof(string_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &string_attr_group);
}
/**
* hp_exit_string_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void hp_exit_string_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < bioscfg_drv.string_instances_count;
instance_id++) {
struct kobject *attr_name_kobj =
bioscfg_drv.string_data[instance_id].attr_name_kobj;
if (attr_name_kobj)
sysfs_remove_group(attr_name_kobj, &string_attr_group);
}
bioscfg_drv.string_instances_count = 0;
kfree(bioscfg_drv.string_data);
bioscfg_drv.string_data = NULL;
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/string-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to methods under BIOS interface GUID
* for use with hp-bioscfg driver.
*
* Copyright (c) 2022 Hewlett-Packard Inc.
*/
#include <linux/wmi.h>
#include "bioscfg.h"
/*
* struct bios_args buffer is dynamically allocated. New WMI command types
* were introduced that exceeds 128-byte data size. Changes to handle
* the data size allocation scheme were kept in hp_wmi_perform_query function.
*/
struct bios_args {
u32 signature;
u32 command;
u32 commandtype;
u32 datasize;
u8 data[];
};
/**
* hp_set_attribute
*
* @a_name: The attribute name
* @a_value: The attribute value
*
* Sets an attribute to new value
*
* Returns zero on success
* -ENODEV if device is not found
* -EINVAL if the instance of 'Setup Admin' password is not found.
* -ENOMEM unable to allocate memory
*/
int hp_set_attribute(const char *a_name, const char *a_value)
{
int security_area_size;
int a_name_size, a_value_size;
u16 *buffer = NULL;
u16 *start;
int buffer_size, instance, ret;
char *auth_token_choice;
mutex_lock(&bioscfg_drv.mutex);
instance = hp_get_password_instance_for_type(SETUP_PASSWD);
if (instance < 0) {
ret = -EINVAL;
goto out_set_attribute;
}
/* Select which auth token to use; password or [auth token] */
if (bioscfg_drv.spm_data.auth_token)
auth_token_choice = bioscfg_drv.spm_data.auth_token;
else
auth_token_choice = bioscfg_drv.password_data[instance].current_password;
a_name_size = hp_calculate_string_buffer(a_name);
a_value_size = hp_calculate_string_buffer(a_value);
security_area_size = hp_calculate_security_buffer(auth_token_choice);
buffer_size = a_name_size + a_value_size + security_area_size;
buffer = kmalloc(buffer_size + 1, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out_set_attribute;
}
/* build variables to set */
start = buffer;
start = hp_ascii_to_utf16_unicode(start, a_name);
if (!start) {
ret = -EINVAL;
goto out_set_attribute;
}
start = hp_ascii_to_utf16_unicode(start, a_value);
if (!start) {
ret = -EINVAL;
goto out_set_attribute;
}
ret = hp_populate_security_buffer(start, auth_token_choice);
if (ret < 0)
goto out_set_attribute;
ret = hp_wmi_set_bios_setting(buffer, buffer_size);
out_set_attribute:
kfree(buffer);
mutex_unlock(&bioscfg_drv.mutex);
return ret;
}
/**
* hp_wmi_perform_query
*
* @query: The commandtype (enum hp_wmi_commandtype)
* @command: The command (enum hp_wmi_command)
* @buffer: Buffer used as input and/or output
* @insize: Size of input buffer
* @outsize: Size of output buffer
*
* returns zero on success
* an HP WMI query specific error code (which is positive)
* -EINVAL if the query was not successful at all
* -EINVAL if the output buffer size exceeds buffersize
*
* Note: The buffersize must at least be the maximum of the input and output
* size. E.g. Battery info query is defined to have 1 byte input
* and 128 byte output. The caller would do:
* buffer = kzalloc(128, GFP_KERNEL);
* ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ,
* buffer, 1, 128)
*/
int hp_wmi_perform_query(int query, enum hp_wmi_command command, void *buffer,
u32 insize, u32 outsize)
{
struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
struct bios_return *bios_return;
union acpi_object *obj = NULL;
struct bios_args *args = NULL;
int mid, actual_outsize, ret;
size_t bios_args_size;
mid = hp_encode_outsize_for_pvsz(outsize);
if (WARN_ON(mid < 0))
return mid;
bios_args_size = struct_size(args, data, insize);
args = kmalloc(bios_args_size, GFP_KERNEL);
if (!args)
return -ENOMEM;
input.length = bios_args_size;
input.pointer = args;
/* BIOS expects 'SECU' in hex as the signature value*/
args->signature = 0x55434553;
args->command = command;
args->commandtype = query;
args->datasize = insize;
memcpy(args->data, buffer, flex_array_size(args, data, insize));
ret = wmi_evaluate_method(HP_WMI_BIOS_GUID, 0, mid, &input, &output);
if (ret)
goto out_free;
obj = output.pointer;
if (!obj) {
ret = -EINVAL;
goto out_free;
}
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(*bios_return)) {
pr_warn("query 0x%x returned wrong type or too small buffer\n", query);
ret = -EINVAL;
goto out_free;
}
bios_return = (struct bios_return *)obj->buffer.pointer;
ret = bios_return->return_code;
if (ret) {
if (ret != INVALID_CMD_VALUE && ret != INVALID_CMD_TYPE)
pr_warn("query 0x%x returned error 0x%x\n", query, ret);
goto out_free;
}
/* Ignore output data of zero size */
if (!outsize)
goto out_free;
actual_outsize = min_t(u32, outsize, obj->buffer.length - sizeof(*bios_return));
memcpy_and_pad(buffer, outsize, obj->buffer.pointer + sizeof(*bios_return),
actual_outsize, 0);
out_free:
ret = hp_wmi_error_and_message(ret);
kfree(obj);
kfree(args);
return ret;
}
static void *utf16_empty_string(u16 *p)
{
*p++ = 2;
*p++ = 0x00;
return p;
}
/**
* hp_ascii_to_utf16_unicode - Convert ascii string to UTF-16 unicode
*
* BIOS supports UTF-16 characters that are 2 bytes long. No variable
* multi-byte language supported.
*
* @p: Unicode buffer address
* @str: string to convert to unicode
*
* Returns a void pointer to the buffer string
*/
void *hp_ascii_to_utf16_unicode(u16 *p, const u8 *str)
{
int len = strlen(str);
int ret;
/*
* Add null character when reading an empty string
* "02 00 00 00"
*/
if (len == 0)
return utf16_empty_string(p);
/* Move pointer len * 2 number of bytes */
*p++ = len * 2;
ret = utf8s_to_utf16s(str, strlen(str), UTF16_HOST_ENDIAN, p, len);
if (ret < 0) {
dev_err(bioscfg_drv.class_dev, "UTF16 conversion failed\n");
return NULL;
}
if (ret * sizeof(u16) > U16_MAX) {
dev_err(bioscfg_drv.class_dev, "Error string too long\n");
return NULL;
}
p += len;
return p;
}
/**
* hp_wmi_set_bios_setting - Set setting's value in BIOS
*
* @input_buffer: Input buffer address
* @input_size: Input buffer size
*
* Returns: Count of unicode characters written to BIOS if successful, otherwise
* -ENOMEM unable to allocate memory
* -EINVAL buffer not allocated or too small
*/
int hp_wmi_set_bios_setting(u16 *input_buffer, u32 input_size)
{
union acpi_object *obj;
struct acpi_buffer input = {input_size, input_buffer};
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
int ret;
ret = wmi_evaluate_method(HP_WMI_SET_BIOS_SETTING_GUID, 0, 1, &input, &output);
obj = output.pointer;
if (!obj)
return -EINVAL;
if (obj->type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out_free;
}
ret = obj->integer.value;
if (ret) {
ret = hp_wmi_error_and_message(ret);
goto out_free;
}
out_free:
kfree(obj);
return ret;
}
static int hp_attr_set_interface_probe(struct wmi_device *wdev, const void *context)
{
mutex_lock(&bioscfg_drv.mutex);
mutex_unlock(&bioscfg_drv.mutex);
return 0;
}
static void hp_attr_set_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&bioscfg_drv.mutex);
mutex_unlock(&bioscfg_drv.mutex);
}
static const struct wmi_device_id hp_attr_set_interface_id_table[] = {
{ .guid_string = HP_WMI_BIOS_GUID},
{ }
};
static struct wmi_driver hp_attr_set_interface_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = hp_attr_set_interface_probe,
.remove = hp_attr_set_interface_remove,
.id_table = hp_attr_set_interface_id_table,
};
int hp_init_attr_set_interface(void)
{
return wmi_driver_register(&hp_attr_set_interface_driver);
}
void hp_exit_attr_set_interface(void)
{
wmi_driver_unregister(&hp_attr_set_interface_driver);
}
MODULE_DEVICE_TABLE(wmi, hp_attr_set_interface_id_table);
| linux-master | drivers/platform/x86/hp/hp-bioscfg/biosattr-interface.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to enumeration type attributes under
* BIOS Enumeration GUID for use with hp-bioscfg driver.
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
GET_INSTANCE_ID(enumeration);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_enumeration_instance_id(kobj);
if (instance_id < 0)
return -EIO;
return sysfs_emit(buf, "%s\n",
bioscfg_drv.enumeration_data[instance_id].current_value);
}
/**
* validate_enumeration_input() -
* Validate input of current_value against possible values
*
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_enumeration_input(int instance_id, const char *buf)
{
int i;
int found = 0;
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
/* Is it a read only attribute */
if (enum_data->common.is_readonly)
return -EIO;
for (i = 0; i < enum_data->possible_values_size && !found; i++)
if (!strcmp(enum_data->possible_values[i], buf))
found = 1;
if (!found)
return -EINVAL;
return 0;
}
static void update_enumeration_value(int instance_id, char *attr_value)
{
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
strscpy(enum_data->current_value,
attr_value,
sizeof(enum_data->current_value));
}
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, enumeration);
static struct kobj_attribute enumeration_display_name =
__ATTR_RO(display_name);
ATTRIBUTE_PROPERTY_STORE(current_value, enumeration);
static struct kobj_attribute enumeration_current_val =
__ATTR_RW(current_value);
ATTRIBUTE_VALUES_PROPERTY_SHOW(possible_values, enumeration, SEMICOLON_SEP);
static struct kobj_attribute enumeration_poss_val =
__ATTR_RO(possible_values);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "enumeration\n");
}
static struct kobj_attribute enumeration_type =
__ATTR_RO(type);
static struct attribute *enumeration_attrs[] = {
&common_display_langcode.attr,
&enumeration_display_name.attr,
&enumeration_current_val.attr,
&enumeration_poss_val.attr,
&enumeration_type.attr,
NULL
};
static const struct attribute_group enumeration_attr_group = {
.attrs = enumeration_attrs,
};
int hp_alloc_enumeration_data(void)
{
bioscfg_drv.enumeration_instances_count =
hp_get_instance_count(HP_WMI_BIOS_ENUMERATION_GUID);
bioscfg_drv.enumeration_data = kcalloc(bioscfg_drv.enumeration_instances_count,
sizeof(*bioscfg_drv.enumeration_data), GFP_KERNEL);
if (!bioscfg_drv.enumeration_data) {
bioscfg_drv.enumeration_instances_count = 0;
return -ENOMEM;
}
return 0;
}
/* Expected Values types associated with each element */
static const acpi_object_type expected_enum_types[] = {
[NAME] = ACPI_TYPE_STRING,
[VALUE] = ACPI_TYPE_STRING,
[PATH] = ACPI_TYPE_STRING,
[IS_READONLY] = ACPI_TYPE_INTEGER,
[DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
[REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
[SEQUENCE] = ACPI_TYPE_INTEGER,
[PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
[PREREQUISITES] = ACPI_TYPE_STRING,
[SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
[ENUM_CURRENT_VALUE] = ACPI_TYPE_STRING,
[ENUM_SIZE] = ACPI_TYPE_INTEGER,
[ENUM_POSSIBLE_VALUES] = ACPI_TYPE_STRING,
};
static int hp_populate_enumeration_elements_from_package(union acpi_object *enum_obj,
int enum_obj_count,
int instance_id)
{
char *str_value = NULL;
int value_len;
u32 size = 0;
u32 int_value = 0;
int elem = 0;
int reqs;
int pos_values;
int ret;
int eloc;
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
for (elem = 1, eloc = 1; elem < enum_obj_count; elem++, eloc++) {
/* ONLY look at the first ENUM_ELEM_CNT elements */
if (eloc == ENUM_ELEM_CNT)
goto exit_enumeration_package;
switch (enum_obj[elem].type) {
case ACPI_TYPE_STRING:
if (PREREQUISITES != elem && ENUM_POSSIBLE_VALUES != elem) {
ret = hp_convert_hexstr_to_str(enum_obj[elem].string.pointer,
enum_obj[elem].string.length,
&str_value, &value_len);
if (ret)
return -EINVAL;
}
break;
case ACPI_TYPE_INTEGER:
int_value = (u32)enum_obj[elem].integer.value;
break;
default:
pr_warn("Unsupported object type [%d]\n", enum_obj[elem].type);
continue;
}
/* Check that both expected and read object type match */
if (expected_enum_types[eloc] != enum_obj[elem].type) {
pr_err("Error expected type %d for elem %d, but got type %d instead\n",
expected_enum_types[eloc], elem, enum_obj[elem].type);
kfree(str_value);
return -EIO;
}
/* Assign appropriate element value to corresponding field */
switch (eloc) {
case NAME:
case VALUE:
break;
case PATH:
strscpy(enum_data->common.path, str_value,
sizeof(enum_data->common.path));
break;
case IS_READONLY:
enum_data->common.is_readonly = int_value;
break;
case DISPLAY_IN_UI:
enum_data->common.display_in_ui = int_value;
break;
case REQUIRES_PHYSICAL_PRESENCE:
enum_data->common.requires_physical_presence = int_value;
break;
case SEQUENCE:
enum_data->common.sequence = int_value;
break;
case PREREQUISITES_SIZE:
if (int_value > MAX_PREREQUISITES_SIZE) {
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_PREREQUISITES_SIZE;
}
enum_data->common.prerequisites_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PREREQUISITES
* object is omitted by BIOS when the size is
* zero.
*/
if (int_value == 0)
eloc++;
break;
case PREREQUISITES:
size = min_t(u32, enum_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
if (elem >= enum_obj_count) {
pr_err("Error enum-objects package is too small\n");
return -EINVAL;
}
ret = hp_convert_hexstr_to_str(enum_obj[elem + reqs].string.pointer,
enum_obj[elem + reqs].string.length,
&str_value, &value_len);
if (ret)
return -EINVAL;
strscpy(enum_data->common.prerequisites[reqs],
str_value,
sizeof(enum_data->common.prerequisites[reqs]));
kfree(str_value);
str_value = NULL;
}
break;
case SECURITY_LEVEL:
enum_data->common.security_level = int_value;
break;
case ENUM_CURRENT_VALUE:
strscpy(enum_data->current_value,
str_value, sizeof(enum_data->current_value));
break;
case ENUM_SIZE:
if (int_value > MAX_VALUES_SIZE) {
pr_warn("Possible number values size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_VALUES_SIZE;
}
enum_data->possible_values_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. POSSIBLE_VALUES
* object is omitted by BIOS when the size is zero.
*/
if (int_value == 0)
eloc++;
break;
case ENUM_POSSIBLE_VALUES:
size = enum_data->possible_values_size;
for (pos_values = 0; pos_values < size && pos_values < MAX_VALUES_SIZE;
pos_values++) {
if (elem >= enum_obj_count) {
pr_err("Error enum-objects package is too small\n");
return -EINVAL;
}
ret = hp_convert_hexstr_to_str(enum_obj[elem + pos_values].string.pointer,
enum_obj[elem + pos_values].string.length,
&str_value, &value_len);
if (ret)
return -EINVAL;
/*
* ignore strings when possible values size
* is greater than MAX_VALUES_SIZE
*/
if (size < MAX_VALUES_SIZE)
strscpy(enum_data->possible_values[pos_values],
str_value,
sizeof(enum_data->possible_values[pos_values]));
kfree(str_value);
str_value = NULL;
}
break;
default:
pr_warn("Invalid element: %d found in Enumeration attribute or data may be malformed\n", elem);
break;
}
kfree(str_value);
str_value = NULL;
}
exit_enumeration_package:
kfree(str_value);
return 0;
}
/**
* hp_populate_enumeration_package_data() -
* Populate all properties of an instance under enumeration attribute
*
* @enum_obj: ACPI object with enumeration data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_enumeration_package_data(union acpi_object *enum_obj,
int instance_id,
struct kobject *attr_name_kobj)
{
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
enum_data->attr_name_kobj = attr_name_kobj;
hp_populate_enumeration_elements_from_package(enum_obj,
enum_obj->package.count,
instance_id);
hp_update_attribute_permissions(enum_data->common.is_readonly,
&enumeration_current_val);
/*
* Several attributes have names such "MONDAY". Friendly
* user nane is generated to make the name more descriptive
*/
hp_friendly_user_name_update(enum_data->common.path,
attr_name_kobj->name,
enum_data->common.display_name,
sizeof(enum_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
}
static int hp_populate_enumeration_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
int instance_id)
{
int values;
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
int ret = 0;
/*
* Only data relevant to this driver and its functionality is
* read. BIOS defines the order in which each * element is
* read. Element 0 data is not relevant to this
* driver hence it is ignored. For clarity, all element names
* (DISPLAY_IN_UI) which defines the order in which is read
* and the name matches the variable where the data is stored.
*
* In earlier implementation, reported errors were ignored
* causing the data to remain uninitialized. It is not
* possible to determine if data read from BIOS is valid or
* not. It is for this reason functions may return a error
* without validating the data itself.
*/
// VALUE:
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, enum_data->current_value,
sizeof(enum_data->current_value));
if (ret < 0)
goto buffer_exit;
// COMMON:
ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &enum_data->common);
if (ret < 0)
goto buffer_exit;
// ENUM_CURRENT_VALUE:
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
enum_data->current_value,
sizeof(enum_data->current_value));
if (ret < 0)
goto buffer_exit;
// ENUM_SIZE:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&enum_data->possible_values_size);
if (enum_data->possible_values_size > MAX_VALUES_SIZE) {
/* Report a message and limit possible values size to maximum value */
pr_warn("Enum Possible size value exceeded the maximum number of elements supported or data may be malformed\n");
enum_data->possible_values_size = MAX_VALUES_SIZE;
}
// ENUM_POSSIBLE_VALUES:
for (values = 0; values < enum_data->possible_values_size; values++) {
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
enum_data->possible_values[values],
sizeof(enum_data->possible_values[values]));
if (ret < 0)
break;
}
buffer_exit:
return ret;
}
/**
* hp_populate_enumeration_buffer_data() -
* Populate all properties of an instance under enumeration attribute
*
* @buffer_ptr: Buffer pointer
* @buffer_size: Buffer size
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_enumeration_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
int instance_id,
struct kobject *attr_name_kobj)
{
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
int ret = 0;
enum_data->attr_name_kobj = attr_name_kobj;
/* Populate enumeration elements */
ret = hp_populate_enumeration_elements_from_buffer(buffer_ptr, buffer_size,
instance_id);
if (ret < 0)
return ret;
hp_update_attribute_permissions(enum_data->common.is_readonly,
&enumeration_current_val);
/*
* Several attributes have names such "MONDAY". A Friendlier
* user nane is generated to make the name more descriptive
*/
hp_friendly_user_name_update(enum_data->common.path,
attr_name_kobj->name,
enum_data->common.display_name,
sizeof(enum_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
}
/**
* hp_exit_enumeration_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void hp_exit_enumeration_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < bioscfg_drv.enumeration_instances_count;
instance_id++) {
struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
struct kobject *attr_name_kobj = enum_data->attr_name_kobj;
if (attr_name_kobj)
sysfs_remove_group(attr_name_kobj, &enumeration_attr_group);
}
bioscfg_drv.enumeration_instances_count = 0;
kfree(bioscfg_drv.enumeration_data);
bioscfg_drv.enumeration_data = NULL;
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to secure platform management object type
* attributes under BIOS PASSWORD for use with hp-bioscfg driver
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
static const char * const spm_state_types[] = {
"not provisioned",
"provisioned",
"provisioning in progress",
};
static const char * const spm_mechanism_types[] = {
"not provisioned",
"signing-key",
"endorsement-key",
};
struct secureplatform_provisioning_data {
u8 state;
u8 version[2];
u8 reserved1;
u32 features;
u32 nonce;
u8 reserved2[28];
u8 sk_mod[MAX_KEY_MOD_SIZE];
u8 kek_mod[MAX_KEY_MOD_SIZE];
};
/**
* hp_calculate_security_buffer() - determines size of security buffer
* for authentication scheme
*
* @authentication: the authentication content
*
* Currently only supported type is Admin password
*/
size_t hp_calculate_security_buffer(const char *authentication)
{
size_t size, authlen;
if (!authentication)
return sizeof(u16) * 2;
authlen = strlen(authentication);
if (!authlen)
return sizeof(u16) * 2;
size = sizeof(u16) + authlen * sizeof(u16);
if (!strstarts(authentication, BEAM_PREFIX))
size += strlen(UTF_PREFIX) * sizeof(u16);
return size;
}
/**
* hp_populate_security_buffer() - builds a security buffer for
* authentication scheme
*
* @authbuf: the security buffer
* @authentication: the authentication content
*
* Currently only supported type is PLAIN TEXT
*/
int hp_populate_security_buffer(u16 *authbuf, const char *authentication)
{
u16 *auth = authbuf;
char *strprefix = NULL;
int ret = 0;
if (strstarts(authentication, BEAM_PREFIX)) {
/*
* BEAM_PREFIX is append to authbuf when a signature
* is provided and Sure Admin is enabled in BIOS
*/
/* BEAM_PREFIX found, convert part to unicode */
auth = hp_ascii_to_utf16_unicode(auth, authentication);
if (!auth)
return -EINVAL;
} else {
/*
* UTF-16 prefix is append to the * authbuf when a BIOS
* admin password is configured in BIOS
*/
/* append UTF_PREFIX to part and then convert it to unicode */
strprefix = kasprintf(GFP_KERNEL, "%s%s", UTF_PREFIX,
authentication);
if (!strprefix)
return -ENOMEM;
auth = hp_ascii_to_utf16_unicode(auth, strprefix);
kfree(strprefix);
if (!auth) {
ret = -EINVAL;
goto out_buffer;
}
}
out_buffer:
return ret;
}
static ssize_t update_spm_state(void)
{
struct secureplatform_provisioning_data data;
int ret;
ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_GET_STATE,
HPWMI_SECUREPLATFORM, &data, 0,
sizeof(data));
if (ret < 0)
return ret;
bioscfg_drv.spm_data.mechanism = data.state;
if (bioscfg_drv.spm_data.mechanism)
bioscfg_drv.spm_data.is_enabled = 1;
return 0;
}
static ssize_t statusbin(struct kobject *kobj,
struct kobj_attribute *attr,
struct secureplatform_provisioning_data *buf)
{
int ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_GET_STATE,
HPWMI_SECUREPLATFORM, buf, 0,
sizeof(*buf));
if (ret < 0)
return ret;
return sizeof(struct secureplatform_provisioning_data);
}
/*
* status_show - Reads SPM status
*/
static ssize_t status_show(struct kobject *kobj, struct kobj_attribute
*attr, char *buf)
{
int ret, i;
int len = 0;
struct secureplatform_provisioning_data data;
ret = statusbin(kobj, attr, &data);
if (ret < 0)
return ret;
/*
* 'status' is a read-only file that returns ASCII text in
* JSON format reporting the status information.
*
* "State": "not provisioned | provisioned | provisioning in progress ",
* "Version": " Major. Minor ",
* "Nonce": <16-bit unsigned number display in base 10>,
* "FeaturesInUse": <16-bit unsigned number display in base 10>,
* "EndorsementKeyMod": "<256 bytes in base64>",
* "SigningKeyMod": "<256 bytes in base64>"
*/
len += sysfs_emit_at(buf, len, "{\n");
len += sysfs_emit_at(buf, len, "\t\"State\": \"%s\",\n",
spm_state_types[data.state]);
len += sysfs_emit_at(buf, len, "\t\"Version\": \"%d.%d\"",
data.version[0], data.version[1]);
/*
* state == 0 means secure platform management
* feature is not configured in BIOS.
*/
if (data.state == 0) {
len += sysfs_emit_at(buf, len, "\n");
goto status_exit;
} else {
len += sysfs_emit_at(buf, len, ",\n");
}
len += sysfs_emit_at(buf, len, "\t\"Nonce\": %d,\n", data.nonce);
len += sysfs_emit_at(buf, len, "\t\"FeaturesInUse\": %d,\n", data.features);
len += sysfs_emit_at(buf, len, "\t\"EndorsementKeyMod\": \"");
for (i = 255; i >= 0; i--)
len += sysfs_emit_at(buf, len, " %u", data.kek_mod[i]);
len += sysfs_emit_at(buf, len, " \",\n");
len += sysfs_emit_at(buf, len, "\t\"SigningKeyMod\": \"");
for (i = 255; i >= 0; i--)
len += sysfs_emit_at(buf, len, " %u", data.sk_mod[i]);
/* Return buf contents */
len += sysfs_emit_at(buf, len, " \"\n");
status_exit:
len += sysfs_emit_at(buf, len, "}\n");
return len;
}
static struct kobj_attribute password_spm_status = __ATTR_RO(status);
ATTRIBUTE_SPM_N_PROPERTY_SHOW(is_enabled, spm);
static struct kobj_attribute password_spm_is_key_enabled = __ATTR_RO(is_enabled);
static ssize_t key_mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n",
spm_mechanism_types[bioscfg_drv.spm_data.mechanism]);
}
static struct kobj_attribute password_spm_key_mechanism = __ATTR_RO(key_mechanism);
static ssize_t sk_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
int length;
length = count;
if (buf[length - 1] == '\n')
length--;
/* allocate space and copy current signing key */
bioscfg_drv.spm_data.signing_key = kmemdup(buf, length, GFP_KERNEL);
if (!bioscfg_drv.spm_data.signing_key)
return -ENOMEM;
/* submit signing key payload */
ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_SET_SK,
HPWMI_SECUREPLATFORM,
(void *)bioscfg_drv.spm_data.signing_key,
count, 0);
if (!ret) {
bioscfg_drv.spm_data.mechanism = SIGNING_KEY;
hp_set_reboot_and_signal_event();
}
kfree(bioscfg_drv.spm_data.signing_key);
bioscfg_drv.spm_data.signing_key = NULL;
return ret ? ret : count;
}
static struct kobj_attribute password_spm_signing_key = __ATTR_WO(sk);
static ssize_t kek_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
int length;
length = count;
if (buf[length - 1] == '\n')
length--;
/* allocate space and copy current signing key */
bioscfg_drv.spm_data.endorsement_key = kmemdup(buf, length, GFP_KERNEL);
if (!bioscfg_drv.spm_data.endorsement_key) {
ret = -ENOMEM;
goto exit_kek;
}
ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_SET_KEK,
HPWMI_SECUREPLATFORM,
(void *)bioscfg_drv.spm_data.endorsement_key,
count, 0);
if (!ret) {
bioscfg_drv.spm_data.mechanism = ENDORSEMENT_KEY;
hp_set_reboot_and_signal_event();
}
exit_kek:
kfree(bioscfg_drv.spm_data.endorsement_key);
bioscfg_drv.spm_data.endorsement_key = NULL;
return ret ? ret : count;
}
static struct kobj_attribute password_spm_endorsement_key = __ATTR_WO(kek);
static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", BIOS_SPM);
}
static struct kobj_attribute password_spm_role = __ATTR_RO(role);
static ssize_t auth_token_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret = 0;
int length;
length = count;
if (buf[length - 1] == '\n')
length--;
/* allocate space and copy current auth token */
bioscfg_drv.spm_data.auth_token = kmemdup(buf, length, GFP_KERNEL);
if (!bioscfg_drv.spm_data.auth_token) {
ret = -ENOMEM;
goto exit_token;
}
return count;
exit_token:
kfree(bioscfg_drv.spm_data.auth_token);
bioscfg_drv.spm_data.auth_token = NULL;
return ret;
}
static struct kobj_attribute password_spm_auth_token = __ATTR_WO(auth_token);
static struct attribute *secure_platform_attrs[] = {
&password_spm_is_key_enabled.attr,
&password_spm_signing_key.attr,
&password_spm_endorsement_key.attr,
&password_spm_key_mechanism.attr,
&password_spm_status.attr,
&password_spm_role.attr,
&password_spm_auth_token.attr,
NULL,
};
static const struct attribute_group secure_platform_attr_group = {
.attrs = secure_platform_attrs,
};
void hp_exit_secure_platform_attributes(void)
{
/* remove secure platform sysfs entry and free key data*/
kfree(bioscfg_drv.spm_data.endorsement_key);
bioscfg_drv.spm_data.endorsement_key = NULL;
kfree(bioscfg_drv.spm_data.signing_key);
bioscfg_drv.spm_data.signing_key = NULL;
kfree(bioscfg_drv.spm_data.auth_token);
bioscfg_drv.spm_data.auth_token = NULL;
if (bioscfg_drv.spm_data.attr_name_kobj)
sysfs_remove_group(bioscfg_drv.spm_data.attr_name_kobj,
&secure_platform_attr_group);
}
int hp_populate_secure_platform_data(struct kobject *attr_name_kobj)
{
/* Populate data for Secure Platform Management */
bioscfg_drv.spm_data.attr_name_kobj = attr_name_kobj;
strscpy(bioscfg_drv.spm_data.attribute_name, SPM_STR,
sizeof(bioscfg_drv.spm_data.attribute_name));
bioscfg_drv.spm_data.is_enabled = 0;
bioscfg_drv.spm_data.mechanism = 0;
bioscfg_drv.pending_reboot = false;
update_spm_state();
bioscfg_drv.spm_data.endorsement_key = NULL;
bioscfg_drv.spm_data.signing_key = NULL;
bioscfg_drv.spm_data.auth_token = NULL;
return sysfs_create_group(attr_name_kobj, &secure_platform_attr_group);
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to password object type attributes under
* BIOS PASSWORD for use with hp-bioscfg driver.
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
#include <asm-generic/posix_types.h>
GET_INSTANCE_ID(password);
/*
* Clear all passwords copied to memory for a particular
* authentication instance
*/
static int clear_passwords(const int instance)
{
struct password_data *password_data = &bioscfg_drv.password_data[instance];
if (!password_data->is_enabled)
return 0;
memset(password_data->current_password,
0, sizeof(password_data->current_password));
memset(password_data->new_password,
0, sizeof(password_data->new_password));
return 0;
}
/*
* Clear all credentials copied to memory for both Power-ON and Setup
* BIOS instances
*/
int hp_clear_all_credentials(void)
{
int count = bioscfg_drv.password_instances_count;
int instance;
/* clear all passwords */
for (instance = 0; instance < count; instance++)
clear_passwords(instance);
/* clear auth_token */
kfree(bioscfg_drv.spm_data.auth_token);
bioscfg_drv.spm_data.auth_token = NULL;
return 0;
}
int hp_get_password_instance_for_type(const char *name)
{
int count = bioscfg_drv.password_instances_count;
int instance;
for (instance = 0; instance < count; instance++)
if (!strcmp(bioscfg_drv.password_data[instance].common.display_name, name))
return instance;
return -EINVAL;
}
static int validate_password_input(int instance_id, const char *buf)
{
int length;
struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
length = strlen(buf);
if (buf[length - 1] == '\n')
length--;
if (length > MAX_PASSWD_SIZE)
return INVALID_BIOS_AUTH;
if (password_data->min_password_length > length ||
password_data->max_password_length < length)
return INVALID_BIOS_AUTH;
return SUCCESS;
}
ATTRIBUTE_N_PROPERTY_SHOW(is_enabled, password);
static struct kobj_attribute password_is_password_set = __ATTR_RO(is_enabled);
static int store_password_instance(struct kobject *kobj, const char *buf,
size_t count, bool is_current)
{
char *buf_cp;
int id, ret = 0;
buf_cp = kstrdup(buf, GFP_KERNEL);
if (!buf_cp)
return -ENOMEM;
ret = hp_enforce_single_line_input(buf_cp, count);
if (!ret) {
id = get_password_instance_id(kobj);
if (id >= 0)
ret = validate_password_input(id, buf_cp);
}
if (!ret) {
if (is_current)
strscpy(bioscfg_drv.password_data[id].current_password,
buf_cp,
sizeof(bioscfg_drv.password_data[id].current_password));
else
strscpy(bioscfg_drv.password_data[id].new_password,
buf_cp,
sizeof(bioscfg_drv.password_data[id].new_password));
}
kfree(buf_cp);
return ret < 0 ? ret : count;
}
static ssize_t current_password_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return store_password_instance(kobj, buf, count, true);
}
static struct kobj_attribute password_current_password = __ATTR_WO(current_password);
static ssize_t new_password_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return store_password_instance(kobj, buf, count, true);
}
static struct kobj_attribute password_new_password = __ATTR_WO(new_password);
ATTRIBUTE_N_PROPERTY_SHOW(min_password_length, password);
static struct kobj_attribute password_min_password_length = __ATTR_RO(min_password_length);
ATTRIBUTE_N_PROPERTY_SHOW(max_password_length, password);
static struct kobj_attribute password_max_password_length = __ATTR_RO(max_password_length);
static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
if (!strcmp(kobj->name, SETUP_PASSWD))
return sysfs_emit(buf, "%s\n", BIOS_ADMIN);
if (!strcmp(kobj->name, POWER_ON_PASSWD))
return sysfs_emit(buf, "%s\n", POWER_ON);
return -EIO;
}
static struct kobj_attribute password_role = __ATTR_RO(role);
static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int i = get_password_instance_id(kobj);
if (i < 0)
return i;
if (bioscfg_drv.password_data[i].mechanism != PASSWORD)
return -EINVAL;
return sysfs_emit(buf, "%s\n", PASSWD_MECHANISM_TYPES);
}
static struct kobj_attribute password_mechanism = __ATTR_RO(mechanism);
ATTRIBUTE_VALUES_PROPERTY_SHOW(encodings, password, SEMICOLON_SEP);
static struct kobj_attribute password_encodings_val = __ATTR_RO(encodings);
static struct attribute *password_attrs[] = {
&password_is_password_set.attr,
&password_min_password_length.attr,
&password_max_password_length.attr,
&password_current_password.attr,
&password_new_password.attr,
&password_role.attr,
&password_mechanism.attr,
&password_encodings_val.attr,
NULL
};
static const struct attribute_group password_attr_group = {
.attrs = password_attrs
};
int hp_alloc_password_data(void)
{
bioscfg_drv.password_instances_count = hp_get_instance_count(HP_WMI_BIOS_PASSWORD_GUID);
bioscfg_drv.password_data = kcalloc(bioscfg_drv.password_instances_count,
sizeof(*bioscfg_drv.password_data), GFP_KERNEL);
if (!bioscfg_drv.password_data) {
bioscfg_drv.password_instances_count = 0;
return -ENOMEM;
}
return 0;
}
/* Expected Values types associated with each element */
static const acpi_object_type expected_password_types[] = {
[NAME] = ACPI_TYPE_STRING,
[VALUE] = ACPI_TYPE_STRING,
[PATH] = ACPI_TYPE_STRING,
[IS_READONLY] = ACPI_TYPE_INTEGER,
[DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
[REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
[SEQUENCE] = ACPI_TYPE_INTEGER,
[PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
[PREREQUISITES] = ACPI_TYPE_STRING,
[SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
[PSWD_MIN_LENGTH] = ACPI_TYPE_INTEGER,
[PSWD_MAX_LENGTH] = ACPI_TYPE_INTEGER,
[PSWD_SIZE] = ACPI_TYPE_INTEGER,
[PSWD_ENCODINGS] = ACPI_TYPE_STRING,
[PSWD_IS_SET] = ACPI_TYPE_INTEGER,
};
static int hp_populate_password_elements_from_package(union acpi_object *password_obj,
int password_obj_count,
int instance_id)
{
char *str_value = NULL;
int value_len;
int ret;
u32 size;
u32 int_value = 0;
int elem;
int reqs;
int eloc;
int pos_values;
struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
if (!password_obj)
return -EINVAL;
for (elem = 1, eloc = 1; elem < password_obj_count; elem++, eloc++) {
/* ONLY look at the first PASSWORD_ELEM_CNT elements */
if (eloc == PSWD_ELEM_CNT)
goto exit_package;
switch (password_obj[elem].type) {
case ACPI_TYPE_STRING:
if (PREREQUISITES != elem && PSWD_ENCODINGS != elem) {
ret = hp_convert_hexstr_to_str(password_obj[elem].string.pointer,
password_obj[elem].string.length,
&str_value, &value_len);
if (ret)
continue;
}
break;
case ACPI_TYPE_INTEGER:
int_value = (u32)password_obj[elem].integer.value;
break;
default:
pr_warn("Unsupported object type [%d]\n", password_obj[elem].type);
continue;
}
/* Check that both expected and read object type match */
if (expected_password_types[eloc] != password_obj[elem].type) {
pr_err("Error expected type %d for elem %d, but got type %d instead\n",
expected_password_types[eloc], elem, password_obj[elem].type);
kfree(str_value);
return -EIO;
}
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
break;
case PATH:
strscpy(password_data->common.path, str_value,
sizeof(password_data->common.path));
break;
case IS_READONLY:
password_data->common.is_readonly = int_value;
break;
case DISPLAY_IN_UI:
password_data->common.display_in_ui = int_value;
break;
case REQUIRES_PHYSICAL_PRESENCE:
password_data->common.requires_physical_presence = int_value;
break;
case SEQUENCE:
password_data->common.sequence = int_value;
break;
case PREREQUISITES_SIZE:
if (int_value > MAX_PREREQUISITES_SIZE) {
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_PREREQUISITES_SIZE;
}
password_data->common.prerequisites_size = int_value;
/* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PREREQUISITES
* object is omitted by BIOS when the size is
* zero.
*/
if (int_value == 0)
eloc++;
break;
case PREREQUISITES:
size = min_t(u32, password_data->common.prerequisites_size,
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
ret = hp_convert_hexstr_to_str(password_obj[elem + reqs].string.pointer,
password_obj[elem + reqs].string.length,
&str_value, &value_len);
if (ret)
break;
strscpy(password_data->common.prerequisites[reqs],
str_value,
sizeof(password_data->common.prerequisites[reqs]));
kfree(str_value);
str_value = NULL;
}
break;
case SECURITY_LEVEL:
password_data->common.security_level = int_value;
break;
case PSWD_MIN_LENGTH:
password_data->min_password_length = int_value;
break;
case PSWD_MAX_LENGTH:
password_data->max_password_length = int_value;
break;
case PSWD_SIZE:
if (int_value > MAX_ENCODINGS_SIZE) {
pr_warn("Password Encoding size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_ENCODINGS_SIZE;
}
password_data->encodings_size = int_value;
/* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PSWD_ENCODINGS
* object is omitted by BIOS when the size is
* zero.
*/
if (int_value == 0)
eloc++;
break;
case PSWD_ENCODINGS:
size = min_t(u32, password_data->encodings_size, MAX_ENCODINGS_SIZE);
for (pos_values = 0; pos_values < size; pos_values++) {
ret = hp_convert_hexstr_to_str(password_obj[elem + pos_values].string.pointer,
password_obj[elem + pos_values].string.length,
&str_value, &value_len);
if (ret)
break;
strscpy(password_data->encodings[pos_values],
str_value,
sizeof(password_data->encodings[pos_values]));
kfree(str_value);
str_value = NULL;
}
break;
case PSWD_IS_SET:
password_data->is_enabled = int_value;
break;
default:
pr_warn("Invalid element: %d found in Password attribute or data may be malformed\n", elem);
break;
}
kfree(str_value);
str_value = NULL;
}
exit_package:
kfree(str_value);
return 0;
}
/**
* hp_populate_password_package_data()
* Populate all properties for an instance under password attribute
*
* @password_obj: ACPI object with password data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_password_package_data(union acpi_object *password_obj, int instance_id,
struct kobject *attr_name_kobj)
{
struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
password_data->attr_name_kobj = attr_name_kobj;
hp_populate_password_elements_from_package(password_obj,
password_obj->package.count,
instance_id);
hp_friendly_user_name_update(password_data->common.path,
attr_name_kobj->name,
password_data->common.display_name,
sizeof(password_data->common.display_name));
if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
return sysfs_create_group(attr_name_kobj, &password_attr_group);
return sysfs_create_group(attr_name_kobj, &password_attr_group);
}
static int hp_populate_password_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
int instance_id)
{
int values;
int isreadonly;
struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
int ret = 0;
/*
* Only data relevant to this driver and its functionality is
* read. BIOS defines the order in which each * element is
* read. Element 0 data is not relevant to this
* driver hence it is ignored. For clarity, all element names
* (DISPLAY_IN_UI) which defines the order in which is read
* and the name matches the variable where the data is stored.
*
* In earlier implementation, reported errors were ignored
* causing the data to remain uninitialized. It is not
* possible to determine if data read from BIOS is valid or
* not. It is for this reason functions may return a error
* without validating the data itself.
*/
// VALUE:
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, password_data->current_password,
sizeof(password_data->current_password));
if (ret < 0)
goto buffer_exit;
// COMMON:
ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size,
&password_data->common);
if (ret < 0)
goto buffer_exit;
// PSWD_MIN_LENGTH:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&password_data->min_password_length);
if (ret < 0)
goto buffer_exit;
// PSWD_MAX_LENGTH:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&password_data->max_password_length);
if (ret < 0)
goto buffer_exit;
// PSWD_SIZE:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&password_data->encodings_size);
if (ret < 0)
goto buffer_exit;
if (password_data->encodings_size > MAX_ENCODINGS_SIZE) {
/* Report a message and limit possible values size to maximum value */
pr_warn("Password Encoding size value exceeded the maximum number of elements supported or data may be malformed\n");
password_data->encodings_size = MAX_ENCODINGS_SIZE;
}
// PSWD_ENCODINGS:
for (values = 0; values < password_data->encodings_size; values++) {
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
password_data->encodings[values],
sizeof(password_data->encodings[values]));
if (ret < 0)
break;
}
// PSWD_IS_SET:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size, &isreadonly);
if (ret < 0)
goto buffer_exit;
password_data->is_enabled = isreadonly ? true : false;
buffer_exit:
return ret;
}
/**
* hp_populate_password_buffer_data()
* Populate all properties for an instance under password object attribute
*
* @buffer_ptr: Buffer pointer
* @buffer_size: Buffer size
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_password_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
struct kobject *attr_name_kobj)
{
struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
int ret = 0;
password_data->attr_name_kobj = attr_name_kobj;
/* Populate Password attributes */
ret = hp_populate_password_elements_from_buffer(buffer_ptr, buffer_size,
instance_id);
if (ret < 0)
return ret;
hp_friendly_user_name_update(password_data->common.path,
attr_name_kobj->name,
password_data->common.display_name,
sizeof(password_data->common.display_name));
if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
return sysfs_create_group(attr_name_kobj, &password_attr_group);
return sysfs_create_group(attr_name_kobj, &password_attr_group);
}
/**
* hp_exit_password_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void hp_exit_password_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < bioscfg_drv.password_instances_count;
instance_id++) {
struct kobject *attr_name_kobj =
bioscfg_drv.password_data[instance_id].attr_name_kobj;
if (attr_name_kobj) {
if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
sysfs_remove_group(attr_name_kobj,
&password_attr_group);
else
sysfs_remove_group(attr_name_kobj,
&password_attr_group);
}
}
bioscfg_drv.password_instances_count = 0;
kfree(bioscfg_drv.password_data);
bioscfg_drv.password_data = NULL;
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to ordered list type attributes under
* BIOS ORDERED LIST GUID for use with hp-bioscfg driver.
*
* Copyright (c) 2022 HP Development Company, L.P.
*/
#include "bioscfg.h"
GET_INSTANCE_ID(ordered_list);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_ordered_list_instance_id(kobj);
if (instance_id < 0)
return -EIO;
return sysfs_emit(buf, "%s\n",
bioscfg_drv.ordered_list_data[instance_id].current_value);
}
static int replace_char_str(u8 *buffer, char *repl_char, char *repl_with)
{
char *src = buffer;
int buflen = strlen(buffer);
int item;
if (buflen < 1)
return -EINVAL;
for (item = 0; item < buflen; item++)
if (src[item] == *repl_char)
src[item] = *repl_with;
return 0;
}
/**
* validate_ordered_list_input() -
* Validate input of current_value against possible values
*
* @instance: The instance on which input is validated
* @buf: Input value
*/
static int validate_ordered_list_input(int instance, char *buf)
{
/* validation is done by BIOS. This validation function will
* convert semicolon to commas. BIOS uses commas as
* separators when reporting ordered-list values.
*/
return replace_char_str(buf, SEMICOLON_SEP, COMMA_SEP);
}
static void update_ordered_list_value(int instance, char *attr_value)
{
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance];
strscpy(ordered_list_data->current_value,
attr_value,
sizeof(ordered_list_data->current_value));
}
ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, ordered_list);
static struct kobj_attribute ordered_list_display_name =
__ATTR_RO(display_name);
ATTRIBUTE_PROPERTY_STORE(current_value, ordered_list);
static struct kobj_attribute ordered_list_current_val =
__ATTR_RW_MODE(current_value, 0644);
ATTRIBUTE_VALUES_PROPERTY_SHOW(elements, ordered_list, SEMICOLON_SEP);
static struct kobj_attribute ordered_list_elements_val =
__ATTR_RO(elements);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "ordered-list\n");
}
static struct kobj_attribute ordered_list_type =
__ATTR_RO(type);
static struct attribute *ordered_list_attrs[] = {
&common_display_langcode.attr,
&ordered_list_display_name.attr,
&ordered_list_current_val.attr,
&ordered_list_elements_val.attr,
&ordered_list_type.attr,
NULL
};
static const struct attribute_group ordered_list_attr_group = {
.attrs = ordered_list_attrs,
};
int hp_alloc_ordered_list_data(void)
{
bioscfg_drv.ordered_list_instances_count =
hp_get_instance_count(HP_WMI_BIOS_ORDERED_LIST_GUID);
bioscfg_drv.ordered_list_data = kcalloc(bioscfg_drv.ordered_list_instances_count,
sizeof(*bioscfg_drv.ordered_list_data),
GFP_KERNEL);
if (!bioscfg_drv.ordered_list_data) {
bioscfg_drv.ordered_list_instances_count = 0;
return -ENOMEM;
}
return 0;
}
/* Expected Values types associated with each element */
static const acpi_object_type expected_order_types[] = {
[NAME] = ACPI_TYPE_STRING,
[VALUE] = ACPI_TYPE_STRING,
[PATH] = ACPI_TYPE_STRING,
[IS_READONLY] = ACPI_TYPE_INTEGER,
[DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
[REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
[SEQUENCE] = ACPI_TYPE_INTEGER,
[PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
[PREREQUISITES] = ACPI_TYPE_STRING,
[SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
[ORD_LIST_SIZE] = ACPI_TYPE_INTEGER,
[ORD_LIST_ELEMENTS] = ACPI_TYPE_STRING,
};
static int hp_populate_ordered_list_elements_from_package(union acpi_object *order_obj,
int order_obj_count,
int instance_id)
{
char *str_value = NULL;
int value_len = 0;
int ret;
u32 size;
u32 int_value = 0;
int elem;
int olist_elem;
int reqs;
int eloc;
char *tmpstr = NULL;
char *part_tmp = NULL;
int tmp_len = 0;
char *part = NULL;
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
if (!order_obj)
return -EINVAL;
for (elem = 1, eloc = 1; eloc < ORD_ELEM_CNT; elem++, eloc++) {
switch (order_obj[elem].type) {
case ACPI_TYPE_STRING:
if (elem != PREREQUISITES && elem != ORD_LIST_ELEMENTS) {
ret = hp_convert_hexstr_to_str(order_obj[elem].string.pointer,
order_obj[elem].string.length,
&str_value, &value_len);
if (ret)
continue;
}
break;
case ACPI_TYPE_INTEGER:
int_value = (u32)order_obj[elem].integer.value;
break;
default:
pr_warn("Unsupported object type [%d]\n", order_obj[elem].type);
continue;
}
/* Check that both expected and read object type match */
if (expected_order_types[eloc] != order_obj[elem].type) {
pr_err("Error expected type %d for elem %d, but got type %d instead\n",
expected_order_types[eloc], elem, order_obj[elem].type);
kfree(str_value);
return -EIO;
}
/* Assign appropriate element value to corresponding field*/
switch (eloc) {
case VALUE:
strscpy(ordered_list_data->current_value,
str_value, sizeof(ordered_list_data->current_value));
replace_char_str(ordered_list_data->current_value, COMMA_SEP, SEMICOLON_SEP);
break;
case PATH:
strscpy(ordered_list_data->common.path, str_value,
sizeof(ordered_list_data->common.path));
break;
case IS_READONLY:
ordered_list_data->common.is_readonly = int_value;
break;
case DISPLAY_IN_UI:
ordered_list_data->common.display_in_ui = int_value;
break;
case REQUIRES_PHYSICAL_PRESENCE:
ordered_list_data->common.requires_physical_presence = int_value;
break;
case SEQUENCE:
ordered_list_data->common.sequence = int_value;
break;
case PREREQUISITES_SIZE:
if (int_value > MAX_PREREQUISITES_SIZE) {
pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_PREREQUISITES_SIZE;
}
ordered_list_data->common.prerequisites_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. PREREQUISITES
* object is omitted by BIOS when the size is
* zero.
*/
if (int_value == 0)
eloc++;
break;
case PREREQUISITES:
size = min_t(u32, ordered_list_data->common.prerequisites_size,
MAX_PREREQUISITES_SIZE);
for (reqs = 0; reqs < size; reqs++) {
ret = hp_convert_hexstr_to_str(order_obj[elem + reqs].string.pointer,
order_obj[elem + reqs].string.length,
&str_value, &value_len);
if (ret)
continue;
strscpy(ordered_list_data->common.prerequisites[reqs],
str_value,
sizeof(ordered_list_data->common.prerequisites[reqs]));
kfree(str_value);
str_value = NULL;
}
break;
case SECURITY_LEVEL:
ordered_list_data->common.security_level = int_value;
break;
case ORD_LIST_SIZE:
if (int_value > MAX_ELEMENTS_SIZE) {
pr_warn("Order List size value exceeded the maximum number of elements supported or data may be malformed\n");
int_value = MAX_ELEMENTS_SIZE;
}
ordered_list_data->elements_size = int_value;
/*
* This step is needed to keep the expected
* element list pointing to the right obj[elem].type
* when the size is zero. ORD_LIST_ELEMENTS
* object is omitted by BIOS when the size is
* zero.
*/
if (int_value == 0)
eloc++;
break;
case ORD_LIST_ELEMENTS:
/*
* Ordered list data is stored in hex and comma separated format
* Convert the data and split it to show each element
*/
ret = hp_convert_hexstr_to_str(str_value, value_len, &tmpstr, &tmp_len);
if (ret)
goto exit_list;
part_tmp = tmpstr;
part = strsep(&part_tmp, COMMA_SEP);
for (olist_elem = 0; olist_elem < MAX_ELEMENTS_SIZE && part; olist_elem++) {
strscpy(ordered_list_data->elements[olist_elem],
part,
sizeof(ordered_list_data->elements[olist_elem]));
part = strsep(&part_tmp, COMMA_SEP);
}
ordered_list_data->elements_size = olist_elem;
kfree(str_value);
str_value = NULL;
break;
default:
pr_warn("Invalid element: %d found in Ordered_List attribute or data may be malformed\n", elem);
break;
}
kfree(tmpstr);
tmpstr = NULL;
kfree(str_value);
str_value = NULL;
}
exit_list:
kfree(tmpstr);
kfree(str_value);
return 0;
}
/**
* hp_populate_ordered_list_package_data() -
* Populate all properties of an instance under ordered_list attribute
*
* @order_obj: ACPI object with ordered_list data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_ordered_list_package_data(union acpi_object *order_obj, int instance_id,
struct kobject *attr_name_kobj)
{
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
ordered_list_data->attr_name_kobj = attr_name_kobj;
hp_populate_ordered_list_elements_from_package(order_obj,
order_obj->package.count,
instance_id);
hp_update_attribute_permissions(ordered_list_data->common.is_readonly,
&ordered_list_current_val);
hp_friendly_user_name_update(ordered_list_data->common.path,
attr_name_kobj->name,
ordered_list_data->common.display_name,
sizeof(ordered_list_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &ordered_list_attr_group);
}
static int hp_populate_ordered_list_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
int instance_id)
{
int values;
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
int ret = 0;
/*
* Only data relevant to this driver and its functionality is
* read. BIOS defines the order in which each * element is
* read. Element 0 data is not relevant to this
* driver hence it is ignored. For clarity, all element names
* (DISPLAY_IN_UI) which defines the order in which is read
* and the name matches the variable where the data is stored.
*
* In earlier implementation, reported errors were ignored
* causing the data to remain uninitialized. It is not
* possible to determine if data read from BIOS is valid or
* not. It is for this reason functions may return a error
* without validating the data itself.
*/
// VALUE:
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, ordered_list_data->current_value,
sizeof(ordered_list_data->current_value));
if (ret < 0)
goto buffer_exit;
replace_char_str(ordered_list_data->current_value, COMMA_SEP, SEMICOLON_SEP);
// COMMON:
ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size,
&ordered_list_data->common);
if (ret < 0)
goto buffer_exit;
// ORD_LIST_SIZE:
ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
&ordered_list_data->elements_size);
if (ordered_list_data->elements_size > MAX_ELEMENTS_SIZE) {
/* Report a message and limit elements size to maximum value */
pr_warn("Ordered List size value exceeded the maximum number of elements supported or data may be malformed\n");
ordered_list_data->elements_size = MAX_ELEMENTS_SIZE;
}
// ORD_LIST_ELEMENTS:
for (values = 0; values < ordered_list_data->elements_size; values++) {
ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
ordered_list_data->elements[values],
sizeof(ordered_list_data->elements[values]));
if (ret < 0)
break;
}
buffer_exit:
return ret;
}
/**
* hp_populate_ordered_list_buffer_data() - Populate all properties of an
* instance under ordered list attribute
*
* @buffer_ptr: Buffer pointer
* @buffer_size: Buffer size
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int hp_populate_ordered_list_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
struct kobject *attr_name_kobj)
{
struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
int ret = 0;
ordered_list_data->attr_name_kobj = attr_name_kobj;
/* Populate ordered list elements */
ret = hp_populate_ordered_list_elements_from_buffer(buffer_ptr, buffer_size,
instance_id);
if (ret < 0)
return ret;
hp_update_attribute_permissions(ordered_list_data->common.is_readonly,
&ordered_list_current_val);
hp_friendly_user_name_update(ordered_list_data->common.path,
attr_name_kobj->name,
ordered_list_data->common.display_name,
sizeof(ordered_list_data->common.display_name));
return sysfs_create_group(attr_name_kobj, &ordered_list_attr_group);
}
/**
* hp_exit_ordered_list_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void hp_exit_ordered_list_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < bioscfg_drv.ordered_list_instances_count;
instance_id++) {
struct kobject *attr_name_kobj =
bioscfg_drv.ordered_list_data[instance_id].attr_name_kobj;
if (attr_name_kobj)
sysfs_remove_group(attr_name_kobj,
&ordered_list_attr_group);
}
bioscfg_drv.ordered_list_instances_count = 0;
kfree(bioscfg_drv.ordered_list_data);
bioscfg_drv.ordered_list_data = NULL;
}
| linux-master | drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for CMOS battery monitoring
*
* Copyright (c) Siemens AG, 2023
*
* Authors:
* Henning Schild <[email protected]>
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "simatic-ipc-batt.h"
static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_21a = {
.table = {
GPIO_LOOKUP_IDX("INTC1020:04", 18, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("INTC1020:04", 19, NULL, 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("INTC1020:01", 66, NULL, 2, GPIO_ACTIVE_HIGH),
{} /* Terminating entry */
},
};
static int simatic_ipc_batt_elkhartlake_remove(struct platform_device *pdev)
{
return simatic_ipc_batt_remove(pdev, &simatic_ipc_batt_gpio_table_bx_21a);
}
static int simatic_ipc_batt_elkhartlake_probe(struct platform_device *pdev)
{
return simatic_ipc_batt_probe(pdev, &simatic_ipc_batt_gpio_table_bx_21a);
}
static struct platform_driver simatic_ipc_batt_driver = {
.probe = simatic_ipc_batt_elkhartlake_probe,
.remove = simatic_ipc_batt_elkhartlake_remove,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(simatic_ipc_batt_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:elkhartlake-pinctrl");
MODULE_AUTHOR("Henning Schild <[email protected]>");
| linux-master | drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for CMOS battery monitoring
*
* Copyright (c) Siemens AG, 2023
*
* Authors:
* Gerd Haeussler <[email protected]>
* Henning Schild <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include <linux/sizes.h>
#include "simatic-ipc-batt.h"
#define BATT_DELAY_MS (1000 * 60 * 60 * 24) /* 24 h delay */
#define SIMATIC_IPC_BATT_LEVEL_FULL 3000
#define SIMATIC_IPC_BATT_LEVEL_CRIT 2750
#define SIMATIC_IPC_BATT_LEVEL_EMPTY 0
static struct simatic_ipc_batt {
u8 devmode;
long current_state;
struct gpio_desc *gpios[3];
unsigned long last_updated_jiffies;
} priv;
static long simatic_ipc_batt_read_gpio(void)
{
long r = SIMATIC_IPC_BATT_LEVEL_FULL;
if (priv.gpios[2]) {
gpiod_set_value(priv.gpios[2], 1);
msleep(150);
}
if (gpiod_get_value_cansleep(priv.gpios[0]))
r = SIMATIC_IPC_BATT_LEVEL_EMPTY;
else if (gpiod_get_value_cansleep(priv.gpios[1]))
r = SIMATIC_IPC_BATT_LEVEL_CRIT;
if (priv.gpios[2])
gpiod_set_value(priv.gpios[2], 0);
return r;
}
#define SIMATIC_IPC_BATT_PORT_BASE 0x404D
static struct resource simatic_ipc_batt_io_res =
DEFINE_RES_IO_NAMED(SIMATIC_IPC_BATT_PORT_BASE, SZ_1, KBUILD_MODNAME);
static long simatic_ipc_batt_read_io(struct device *dev)
{
long r = SIMATIC_IPC_BATT_LEVEL_FULL;
struct resource *res = &simatic_ipc_batt_io_res;
u8 val;
if (!request_muxed_region(res->start, resource_size(res), res->name)) {
dev_err(dev, "Unable to register IO resource at %pR\n", res);
return -EBUSY;
}
val = inb(SIMATIC_IPC_BATT_PORT_BASE);
release_region(simatic_ipc_batt_io_res.start, resource_size(&simatic_ipc_batt_io_res));
if (val & (1 << 7))
r = SIMATIC_IPC_BATT_LEVEL_EMPTY;
else if (val & (1 << 6))
r = SIMATIC_IPC_BATT_LEVEL_CRIT;
return r;
}
static long simatic_ipc_batt_read_value(struct device *dev)
{
unsigned long next_update;
next_update = priv.last_updated_jiffies + msecs_to_jiffies(BATT_DELAY_MS);
if (time_after(jiffies, next_update) || !priv.last_updated_jiffies) {
if (priv.devmode == SIMATIC_IPC_DEVICE_227E)
priv.current_state = simatic_ipc_batt_read_io(dev);
else
priv.current_state = simatic_ipc_batt_read_gpio();
priv.last_updated_jiffies = jiffies;
if (priv.current_state < SIMATIC_IPC_BATT_LEVEL_FULL)
dev_warn(dev, "CMOS battery needs to be replaced.\n");
}
return priv.current_state;
}
static int simatic_ipc_batt_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_in_input:
*val = simatic_ipc_batt_read_value(dev);
break;
case hwmon_in_lcrit:
*val = SIMATIC_IPC_BATT_LEVEL_CRIT;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static umode_t simatic_ipc_batt_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
if (attr == hwmon_in_input || attr == hwmon_in_lcrit)
return 0444;
return 0;
}
static const struct hwmon_ops simatic_ipc_batt_ops = {
.is_visible = simatic_ipc_batt_is_visible,
.read = simatic_ipc_batt_read,
};
static const struct hwmon_channel_info *simatic_ipc_batt_info[] = {
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LCRIT),
NULL
};
static const struct hwmon_chip_info simatic_ipc_batt_chip_info = {
.ops = &simatic_ipc_batt_ops,
.info = simatic_ipc_batt_info,
};
int simatic_ipc_batt_remove(struct platform_device *pdev, struct gpiod_lookup_table *table)
{
gpiod_remove_lookup_table(table);
return 0;
}
EXPORT_SYMBOL_GPL(simatic_ipc_batt_remove);
int simatic_ipc_batt_probe(struct platform_device *pdev, struct gpiod_lookup_table *table)
{
struct simatic_ipc_platform *plat;
struct device *dev = &pdev->dev;
struct device *hwmon_dev;
unsigned long flags;
int err;
plat = pdev->dev.platform_data;
priv.devmode = plat->devmode;
switch (priv.devmode) {
case SIMATIC_IPC_DEVICE_127E:
case SIMATIC_IPC_DEVICE_227G:
case SIMATIC_IPC_DEVICE_BX_39A:
case SIMATIC_IPC_DEVICE_BX_21A:
case SIMATIC_IPC_DEVICE_BX_59A:
table->dev_id = dev_name(dev);
gpiod_add_lookup_table(table);
break;
case SIMATIC_IPC_DEVICE_227E:
goto nogpio;
default:
return -ENODEV;
}
priv.gpios[0] = devm_gpiod_get_index(dev, "CMOSBattery empty", 0, GPIOD_IN);
if (IS_ERR(priv.gpios[0])) {
err = PTR_ERR(priv.gpios[0]);
priv.gpios[0] = NULL;
goto out;
}
priv.gpios[1] = devm_gpiod_get_index(dev, "CMOSBattery low", 1, GPIOD_IN);
if (IS_ERR(priv.gpios[1])) {
err = PTR_ERR(priv.gpios[1]);
priv.gpios[1] = NULL;
goto out;
}
if (table->table[2].key) {
flags = GPIOD_OUT_HIGH;
if (priv.devmode == SIMATIC_IPC_DEVICE_BX_21A ||
priv.devmode == SIMATIC_IPC_DEVICE_BX_59A)
flags = GPIOD_OUT_LOW;
priv.gpios[2] = devm_gpiod_get_index(dev, "CMOSBattery meter", 2, flags);
if (IS_ERR(priv.gpios[2])) {
err = PTR_ERR(priv.gpios[2]);
priv.gpios[2] = NULL;
goto out;
}
} else {
priv.gpios[2] = NULL;
}
nogpio:
hwmon_dev = devm_hwmon_device_register_with_info(dev, KBUILD_MODNAME,
&priv,
&simatic_ipc_batt_chip_info,
NULL);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto out;
}
/* warn about aging battery even if userspace never reads hwmon */
simatic_ipc_batt_read_value(dev);
return 0;
out:
simatic_ipc_batt_remove(pdev, table);
return err;
}
EXPORT_SYMBOL_GPL(simatic_ipc_batt_probe);
static int simatic_ipc_batt_io_remove(struct platform_device *pdev)
{
return simatic_ipc_batt_remove(pdev, NULL);
}
static int simatic_ipc_batt_io_probe(struct platform_device *pdev)
{
return simatic_ipc_batt_probe(pdev, NULL);
}
static struct platform_driver simatic_ipc_batt_driver = {
.probe = simatic_ipc_batt_io_probe,
.remove = simatic_ipc_batt_io_remove,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(simatic_ipc_batt_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_AUTHOR("Henning Schild <[email protected]>");
| linux-master | drivers/platform/x86/siemens/simatic-ipc-batt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for CMOS battery monitoring
*
* Copyright (c) Siemens AG, 2023
*
* Authors:
* Henning Schild <[email protected]>
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/x86/simatic-ipc-base.h>
#include "simatic-ipc-batt.h"
static struct gpiod_lookup_table *batt_lookup_table;
static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_227g = {
.table = {
GPIO_LOOKUP_IDX("gpio-f7188x-7", 6, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-f7188x-7", 5, NULL, 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("INTC1020:01", 66, NULL, 2, GPIO_ACTIVE_HIGH),
{} /* Terminating entry */
},
};
static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_39a = {
.table = {
GPIO_LOOKUP_IDX("gpio-f7188x-6", 4, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-f7188x-6", 3, NULL, 1, GPIO_ACTIVE_HIGH),
{} /* Terminating entry */
},
};
static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_59a = {
.table = {
GPIO_LOOKUP_IDX("gpio-f7188x-7", 6, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("gpio-f7188x-7", 5, NULL, 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("INTC1056:00", 438, NULL, 2, GPIO_ACTIVE_HIGH),
{} /* Terminating entry */
}
};
static int simatic_ipc_batt_f7188x_remove(struct platform_device *pdev)
{
return simatic_ipc_batt_remove(pdev, batt_lookup_table);
}
static int simatic_ipc_batt_f7188x_probe(struct platform_device *pdev)
{
const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227G:
batt_lookup_table = &simatic_ipc_batt_gpio_table_227g;
break;
case SIMATIC_IPC_DEVICE_BX_39A:
batt_lookup_table = &simatic_ipc_batt_gpio_table_bx_39a;
break;
case SIMATIC_IPC_DEVICE_BX_59A:
batt_lookup_table = &simatic_ipc_batt_gpio_table_bx_59a;
break;
default:
return -ENODEV;
}
return simatic_ipc_batt_probe(pdev, batt_lookup_table);
}
static struct platform_driver simatic_ipc_batt_driver = {
.probe = simatic_ipc_batt_f7188x_probe,
.remove = simatic_ipc_batt_f7188x_remove,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(simatic_ipc_batt_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt gpio_f7188x platform:elkhartlake-pinctrl platform:alderlake-pinctrl");
MODULE_AUTHOR("Henning Schild <[email protected]>");
| linux-master | drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC platform driver
*
* Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <[email protected]>
* Jan Kiszka <[email protected]>
* Gerd Haeussler <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/x86/simatic-ipc.h>
#include <linux/platform_device.h>
static struct platform_device *ipc_led_platform_device;
static struct platform_device *ipc_wdt_platform_device;
static struct platform_device *ipc_batt_platform_device;
static const struct dmi_system_id simatic_ipc_whitelist[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
},
},
{}
};
static struct simatic_ipc_platform platform_data;
#define SIMATIC_IPC_MAX_EXTRA_MODULES 2
static struct {
u32 station_id;
u8 led_mode;
u8 wdt_mode;
u8 batt_mode;
char *extra_modules[SIMATIC_IPC_MAX_EXTRA_MODULES];
} device_modes[] = {
{SIMATIC_IPC_IPC127E,
SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_127E,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC227D,
SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC227E,
SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E, SIMATIC_IPC_DEVICE_227E,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC227G,
SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G,
{ "nct6775", "w83627hf_wdt" }},
{SIMATIC_IPC_IPC277G,
SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G,
{ "nct6775", "w83627hf_wdt" }},
{SIMATIC_IPC_IPC277E,
SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E, SIMATIC_IPC_DEVICE_227E,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC427D,
SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC427E,
SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPC477E,
SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPCBX_39A,
SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_39A,
{ "nct6775", "w83627hf_wdt" }},
{SIMATIC_IPC_IPCPX_39A,
SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_39A,
{ "nct6775", "w83627hf_wdt" }},
{SIMATIC_IPC_IPCBX_21A,
SIMATIC_IPC_DEVICE_BX_21A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_21A,
{ "emc1403", NULL }},
{SIMATIC_IPC_IPCBX_56A,
SIMATIC_IPC_DEVICE_BX_59A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_59A,
{ "emc1403", "w83627hf_wdt" }},
{SIMATIC_IPC_IPCBX_59A,
SIMATIC_IPC_DEVICE_BX_59A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_59A,
{ "emc1403", "w83627hf_wdt" }},
};
static int register_platform_devices(u32 station_id)
{
u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
u8 battmode = SIMATIC_IPC_DEVICE_NONE;
char *pdevname;
int i;
for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
if (device_modes[i].station_id == station_id) {
ledmode = device_modes[i].led_mode;
wdtmode = device_modes[i].wdt_mode;
battmode = device_modes[i].batt_mode;
break;
}
}
if (battmode != SIMATIC_IPC_DEVICE_NONE) {
pdevname = KBUILD_MODNAME "_batt";
if (battmode == SIMATIC_IPC_DEVICE_127E)
pdevname = KBUILD_MODNAME "_batt_apollolake";
if (battmode == SIMATIC_IPC_DEVICE_BX_21A)
pdevname = KBUILD_MODNAME "_batt_elkhartlake";
if (battmode == SIMATIC_IPC_DEVICE_227G ||
battmode == SIMATIC_IPC_DEVICE_BX_39A ||
battmode == SIMATIC_IPC_DEVICE_BX_59A)
pdevname = KBUILD_MODNAME "_batt_f7188x";
platform_data.devmode = battmode;
ipc_batt_platform_device =
platform_device_register_data(NULL, pdevname,
PLATFORM_DEVID_NONE, &platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_batt_platform_device))
return PTR_ERR(ipc_batt_platform_device);
pr_debug("device=%s created\n",
ipc_batt_platform_device->name);
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
pdevname = KBUILD_MODNAME "_leds";
if (ledmode == SIMATIC_IPC_DEVICE_127E)
pdevname = KBUILD_MODNAME "_leds_gpio_apollolake";
if (ledmode == SIMATIC_IPC_DEVICE_227G || ledmode == SIMATIC_IPC_DEVICE_BX_59A)
pdevname = KBUILD_MODNAME "_leds_gpio_f7188x";
if (ledmode == SIMATIC_IPC_DEVICE_BX_21A)
pdevname = KBUILD_MODNAME "_leds_gpio_elkhartlake";
platform_data.devmode = ledmode;
ipc_led_platform_device =
platform_device_register_data(NULL,
pdevname, PLATFORM_DEVID_NONE,
&platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_led_platform_device))
return PTR_ERR(ipc_led_platform_device);
pr_debug("device=%s created\n",
ipc_led_platform_device->name);
}
if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
platform_data.devmode = wdtmode;
ipc_wdt_platform_device =
platform_device_register_data(NULL,
KBUILD_MODNAME "_wdt", PLATFORM_DEVID_NONE,
&platform_data,
sizeof(struct simatic_ipc_platform));
if (IS_ERR(ipc_wdt_platform_device))
return PTR_ERR(ipc_wdt_platform_device);
pr_debug("device=%s created\n",
ipc_wdt_platform_device->name);
}
if (ledmode == SIMATIC_IPC_DEVICE_NONE &&
wdtmode == SIMATIC_IPC_DEVICE_NONE &&
battmode == SIMATIC_IPC_DEVICE_NONE) {
pr_warn("unsupported IPC detected, station id=%08x\n",
station_id);
return -EINVAL;
}
return 0;
}
static void request_additional_modules(u32 station_id)
{
char **extra_modules = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
if (device_modes[i].station_id == station_id) {
extra_modules = device_modes[i].extra_modules;
break;
}
}
if (!extra_modules)
return;
for (i = 0; i < SIMATIC_IPC_MAX_EXTRA_MODULES; i++) {
if (extra_modules[i])
request_module(extra_modules[i]);
else
break;
}
}
static int __init simatic_ipc_init_module(void)
{
const struct dmi_system_id *match;
u32 station_id;
int err;
match = dmi_first_match(simatic_ipc_whitelist);
if (!match)
return 0;
err = dmi_walk(simatic_ipc_find_dmi_entry_helper, &station_id);
if (err || station_id == SIMATIC_IPC_INVALID_STATION_ID) {
pr_warn("DMI entry %d not found\n", SIMATIC_IPC_DMI_ENTRY_OEM);
return 0;
}
request_additional_modules(station_id);
return register_platform_devices(station_id);
}
static void __exit simatic_ipc_exit_module(void)
{
platform_device_unregister(ipc_led_platform_device);
ipc_led_platform_device = NULL;
platform_device_unregister(ipc_wdt_platform_device);
ipc_wdt_platform_device = NULL;
platform_device_unregister(ipc_batt_platform_device);
ipc_batt_platform_device = NULL;
}
module_init(simatic_ipc_init_module);
module_exit(simatic_ipc_exit_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gerd Haeussler <[email protected]>");
MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
| linux-master | drivers/platform/x86/siemens/simatic-ipc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for CMOS battery monitoring
*
* Copyright (c) Siemens AG, 2023
*
* Authors:
* Henning Schild <[email protected]>
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "simatic-ipc-batt.h"
static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_127e = {
.table = {
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 55, NULL, 0, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 61, NULL, 1, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("apollolake-pinctrl.1", 41, NULL, 2, GPIO_ACTIVE_HIGH),
{} /* Terminating entry */
},
};
static int simatic_ipc_batt_apollolake_remove(struct platform_device *pdev)
{
return simatic_ipc_batt_remove(pdev, &simatic_ipc_batt_gpio_table_127e);
}
static int simatic_ipc_batt_apollolake_probe(struct platform_device *pdev)
{
return simatic_ipc_batt_probe(pdev, &simatic_ipc_batt_gpio_table_127e);
}
static struct platform_driver simatic_ipc_batt_driver = {
.probe = simatic_ipc_batt_apollolake_probe,
.remove = simatic_ipc_batt_apollolake_remove,
.driver = {
.name = KBUILD_MODNAME,
},
};
module_platform_driver(simatic_ipc_batt_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: simatic-ipc-batt platform:apollolake-pinctrl");
MODULE_AUTHOR("Henning Schild <[email protected]>");
| linux-master | drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* dell_rbu.c
* Bios Update driver for Dell systems
* Author: Dell Inc
* Abhay Salunke <[email protected]>
*
* Copyright (C) 2005 Dell Inc.
*
* Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
* creating entries in the /sys file systems on Linux 2.6 and higher
* kernels. The driver supports two mechanism to update the BIOS namely
* contiguous and packetized. Both these methods still require having some
* application to set the CMOS bit indicating the BIOS to update itself
* after a reboot.
*
* Contiguous method:
* This driver writes the incoming data in a monolithic image by allocating
* contiguous physical pages large enough to accommodate the incoming BIOS
* image size.
*
* Packetized method:
* The driver writes the incoming packet image by allocating a new packet
* on every time the packet data is written. This driver requires an
* application to break the BIOS image in to fixed sized packet chunks.
*
* See Documentation/admin-guide/dell_rbu.rst for more info.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/blkdev.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include <asm/set_memory.h>
MODULE_AUTHOR("Abhay Salunke <[email protected]>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
MODULE_LICENSE("GPL");
MODULE_VERSION("3.2");
#define BIOS_SCAN_LIMIT 0xffffffff
#define MAX_IMAGE_LENGTH 16
static struct _rbu_data {
void *image_update_buffer;
unsigned long image_update_buffer_size;
unsigned long bios_image_size;
int image_update_ordernum;
spinlock_t lock;
unsigned long packet_read_count;
unsigned long num_packets;
unsigned long packetsize;
unsigned long imagesize;
int entry_created;
} rbu_data;
static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
module_param_string(image_type, image_type, sizeof (image_type), 0);
MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet or init");
static unsigned long allocation_floor = 0x100000;
module_param(allocation_floor, ulong, 0644);
MODULE_PARM_DESC(allocation_floor, "Minimum address for allocations when using Packet mode");
struct packet_data {
struct list_head list;
size_t length;
void *data;
int ordernum;
};
static struct packet_data packet_data_head;
static struct platform_device *rbu_device;
static int context;
static void init_packet_head(void)
{
INIT_LIST_HEAD(&packet_data_head.list);
rbu_data.packet_read_count = 0;
rbu_data.num_packets = 0;
rbu_data.packetsize = 0;
rbu_data.imagesize = 0;
}
static int create_packet(void *data, size_t length)
{
struct packet_data *newpacket;
int ordernum = 0;
int retval = 0;
unsigned int packet_array_size = 0;
void **invalid_addr_packet_array = NULL;
void *packet_data_temp_buf = NULL;
unsigned int idx = 0;
pr_debug("entry\n");
if (!rbu_data.packetsize) {
pr_debug("packetsize not specified\n");
retval = -EINVAL;
goto out_noalloc;
}
spin_unlock(&rbu_data.lock);
newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);
if (!newpacket) {
pr_warn("failed to allocate new packet\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_noalloc;
}
ordernum = get_order(length);
/*
* BIOS errata mean we cannot allocate packets below 1MB or they will
* be overwritten by BIOS.
*
* array to temporarily hold packets
* that are below the allocation floor
*
* NOTE: very simplistic because we only need the floor to be at 1MB
* due to BIOS errata. This shouldn't be used for higher floors
* or you will run out of mem trying to allocate the array.
*/
packet_array_size = max_t(unsigned int, allocation_floor / rbu_data.packetsize, 1);
invalid_addr_packet_array = kcalloc(packet_array_size, sizeof(void *),
GFP_KERNEL);
if (!invalid_addr_packet_array) {
pr_warn("failed to allocate invalid_addr_packet_array\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_alloc_packet;
}
while (!packet_data_temp_buf) {
packet_data_temp_buf = (unsigned char *)
__get_free_pages(GFP_KERNEL, ordernum);
if (!packet_data_temp_buf) {
pr_warn("failed to allocate new packet\n");
retval = -ENOMEM;
spin_lock(&rbu_data.lock);
goto out_alloc_packet_array;
}
if ((unsigned long)virt_to_phys(packet_data_temp_buf)
< allocation_floor) {
pr_debug("packet 0x%lx below floor at 0x%lx\n",
(unsigned long)virt_to_phys(
packet_data_temp_buf),
allocation_floor);
invalid_addr_packet_array[idx++] = packet_data_temp_buf;
packet_data_temp_buf = NULL;
}
}
/*
* set to uncachable or it may never get written back before reboot
*/
set_memory_uc((unsigned long)packet_data_temp_buf, 1 << ordernum);
spin_lock(&rbu_data.lock);
newpacket->data = packet_data_temp_buf;
pr_debug("newpacket at physical addr %lx\n",
(unsigned long)virt_to_phys(newpacket->data));
/* packets may not have fixed size */
newpacket->length = length;
newpacket->ordernum = ordernum;
++rbu_data.num_packets;
/* initialize the newly created packet headers */
INIT_LIST_HEAD(&newpacket->list);
list_add_tail(&newpacket->list, &packet_data_head.list);
memcpy(newpacket->data, data, length);
pr_debug("exit\n");
out_alloc_packet_array:
/* always free packet array */
while (idx--) {
pr_debug("freeing unused packet below floor 0x%lx\n",
(unsigned long)virt_to_phys(invalid_addr_packet_array[idx]));
free_pages((unsigned long)invalid_addr_packet_array[idx], ordernum);
}
kfree(invalid_addr_packet_array);
out_alloc_packet:
/* if error, free data */
if (retval)
kfree(newpacket);
out_noalloc:
return retval;
}
static int packetize_data(const u8 *data, size_t length)
{
int rc = 0;
int done = 0;
int packet_length;
u8 *temp;
u8 *end = (u8 *) data + length;
pr_debug("data length %zd\n", length);
if (!rbu_data.packetsize) {
pr_warn("packetsize not specified\n");
return -EIO;
}
temp = (u8 *) data;
/* packetize the hunk */
while (!done) {
if ((temp + rbu_data.packetsize) < end)
packet_length = rbu_data.packetsize;
else {
/* this is the last packet */
packet_length = end - temp;
done = 1;
}
if ((rc = create_packet(temp, packet_length)))
return rc;
pr_debug("%p:%td\n", temp, (end - temp));
temp += packet_length;
}
rbu_data.imagesize = length;
return rc;
}
static int do_packet_read(char *data, struct packet_data *newpacket,
int length, int bytes_read, int *list_read_count)
{
void *ptemp_buf;
int bytes_copied = 0;
int j = 0;
*list_read_count += newpacket->length;
if (*list_read_count > bytes_read) {
/* point to the start of unread data */
j = newpacket->length - (*list_read_count - bytes_read);
/* point to the offset in the packet buffer */
ptemp_buf = (u8 *) newpacket->data + j;
/*
* check if there is enough room in
* * the incoming buffer
*/
if (length > (*list_read_count - bytes_read))
/*
* copy what ever is there in this
* packet and move on
*/
bytes_copied = (*list_read_count - bytes_read);
else
/* copy the remaining */
bytes_copied = length;
memcpy(data, ptemp_buf, bytes_copied);
}
return bytes_copied;
}
static int packet_read_list(char *data, size_t * pread_length)
{
struct packet_data *newpacket;
int temp_count = 0;
int bytes_copied = 0;
int bytes_read = 0;
int remaining_bytes = 0;
char *pdest = data;
/* check if we have any packets */
if (0 == rbu_data.num_packets)
return -ENOMEM;
remaining_bytes = *pread_length;
bytes_read = rbu_data.packet_read_count;
list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
bytes_copied = do_packet_read(pdest, newpacket,
remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied;
bytes_read += bytes_copied;
pdest += bytes_copied;
/*
* check if we reached end of buffer before reaching the
* last packet
*/
if (remaining_bytes == 0)
break;
}
/*finally set the bytes read */
*pread_length = bytes_read - rbu_data.packet_read_count;
rbu_data.packet_read_count = bytes_read;
return 0;
}
static void packet_empty_list(void)
{
struct packet_data *newpacket, *tmp;
list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
list_del(&newpacket->list);
/*
* zero out the RBU packet memory before freeing
* to make sure there are no stale RBU packets left in memory
*/
memset(newpacket->data, 0, rbu_data.packetsize);
set_memory_wb((unsigned long)newpacket->data,
1 << newpacket->ordernum);
free_pages((unsigned long) newpacket->data,
newpacket->ordernum);
kfree(newpacket);
}
rbu_data.packet_read_count = 0;
rbu_data.num_packets = 0;
rbu_data.imagesize = 0;
}
/*
* img_update_free: Frees the buffer allocated for storing BIOS image
* Always called with lock held and returned with lock held
*/
static void img_update_free(void)
{
if (!rbu_data.image_update_buffer)
return;
/*
* zero out this buffer before freeing it to get rid of any stale
* BIOS image copied in memory.
*/
memset(rbu_data.image_update_buffer, 0,
rbu_data.image_update_buffer_size);
free_pages((unsigned long) rbu_data.image_update_buffer,
rbu_data.image_update_ordernum);
/*
* Re-initialize the rbu_data variables after a free
*/
rbu_data.image_update_ordernum = -1;
rbu_data.image_update_buffer = NULL;
rbu_data.image_update_buffer_size = 0;
rbu_data.bios_image_size = 0;
}
/*
* img_update_realloc: This function allocates the contiguous pages to
* accommodate the requested size of data. The memory address and size
* values are stored globally and on every call to this function the new
* size is checked to see if more data is required than the existing size.
* If true the previous memory is freed and new allocation is done to
* accommodate the new size. If the incoming size is less then than the
* already allocated size, then that memory is reused. This function is
* called with lock held and returns with lock held.
*/
static int img_update_realloc(unsigned long size)
{
unsigned char *image_update_buffer = NULL;
unsigned long img_buf_phys_addr;
int ordernum;
/*
* check if the buffer of sufficient size has been
* already allocated
*/
if (rbu_data.image_update_buffer_size >= size) {
/*
* check for corruption
*/
if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
pr_err("corruption check failed\n");
return -EINVAL;
}
/*
* we have a valid pre-allocated buffer with
* sufficient size
*/
return 0;
}
/*
* free any previously allocated buffer
*/
img_update_free();
spin_unlock(&rbu_data.lock);
ordernum = get_order(size);
image_update_buffer =
(unsigned char *)__get_free_pages(GFP_DMA32, ordernum);
spin_lock(&rbu_data.lock);
if (!image_update_buffer) {
pr_debug("Not enough memory for image update: size = %ld\n", size);
return -ENOMEM;
}
img_buf_phys_addr = (unsigned long)virt_to_phys(image_update_buffer);
if (WARN_ON_ONCE(img_buf_phys_addr > BIOS_SCAN_LIMIT))
return -EINVAL; /* can't happen per definition */
rbu_data.image_update_buffer = image_update_buffer;
rbu_data.image_update_buffer_size = size;
rbu_data.bios_image_size = rbu_data.image_update_buffer_size;
rbu_data.image_update_ordernum = ordernum;
return 0;
}
static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
{
int retval;
size_t bytes_left;
size_t data_length;
char *ptempBuf = buffer;
/* check to see if we have something to return */
if (rbu_data.num_packets == 0) {
pr_debug("no packets written\n");
retval = -ENOMEM;
goto read_rbu_data_exit;
}
if (pos > rbu_data.imagesize) {
retval = 0;
pr_warn("data underrun\n");
goto read_rbu_data_exit;
}
bytes_left = rbu_data.imagesize - pos;
data_length = min(bytes_left, count);
if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
goto read_rbu_data_exit;
if ((pos + count) > rbu_data.imagesize) {
rbu_data.packet_read_count = 0;
/* this was the last copy */
retval = bytes_left;
} else
retval = count;
read_rbu_data_exit:
return retval;
}
static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
{
/* check to see if we have something to return */
if ((rbu_data.image_update_buffer == NULL) ||
(rbu_data.bios_image_size == 0)) {
pr_debug("image_update_buffer %p, bios_image_size %lu\n",
rbu_data.image_update_buffer,
rbu_data.bios_image_size);
return -ENOMEM;
}
return memory_read_from_buffer(buffer, count, &pos,
rbu_data.image_update_buffer, rbu_data.bios_image_size);
}
static ssize_t data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t ret_count = 0;
spin_lock(&rbu_data.lock);
if (!strcmp(image_type, "mono"))
ret_count = read_rbu_mono_data(buffer, pos, count);
else if (!strcmp(image_type, "packet"))
ret_count = read_packet_data(buffer, pos, count);
else
pr_debug("invalid image type specified\n");
spin_unlock(&rbu_data.lock);
return ret_count;
}
static BIN_ATTR_RO(data, 0);
static void callbackfn_rbu(const struct firmware *fw, void *context)
{
rbu_data.entry_created = 0;
if (!fw)
return;
if (!fw->size)
goto out;
spin_lock(&rbu_data.lock);
if (!strcmp(image_type, "mono")) {
if (!img_update_realloc(fw->size))
memcpy(rbu_data.image_update_buffer,
fw->data, fw->size);
} else if (!strcmp(image_type, "packet")) {
/*
* we need to free previous packets if a
* new hunk of packets needs to be downloaded
*/
packet_empty_list();
if (packetize_data(fw->data, fw->size))
/* Incase something goes wrong when we are
* in middle of packetizing the data, we
* need to free up whatever packets might
* have been created before we quit.
*/
packet_empty_list();
} else
pr_debug("invalid image type specified\n");
spin_unlock(&rbu_data.lock);
out:
release_firmware(fw);
}
static ssize_t image_type_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos)
size = scnprintf(buffer, count, "%s\n", image_type);
return size;
}
static ssize_t image_type_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int rc = count;
int req_firm_rc = 0;
int i;
spin_lock(&rbu_data.lock);
/*
* Find the first newline or space
*/
for (i = 0; i < count; ++i)
if (buffer[i] == '\n' || buffer[i] == ' ') {
buffer[i] = '\0';
break;
}
if (i == count)
buffer[count] = '\0';
if (strstr(buffer, "mono"))
strcpy(image_type, "mono");
else if (strstr(buffer, "packet"))
strcpy(image_type, "packet");
else if (strstr(buffer, "init")) {
/*
* If due to the user error the driver gets in a bad
* state where even though it is loaded , the
* /sys/class/firmware/dell_rbu entries are missing.
* to cover this situation the user can recreate entries
* by writing init to image_type.
*/
if (!rbu_data.entry_created) {
spin_unlock(&rbu_data.lock);
req_firm_rc = request_firmware_nowait(THIS_MODULE,
FW_ACTION_NOUEVENT, "dell_rbu",
&rbu_device->dev, GFP_KERNEL, &context,
callbackfn_rbu);
if (req_firm_rc) {
pr_err("request_firmware_nowait failed %d\n", rc);
rc = -EIO;
} else
rbu_data.entry_created = 1;
spin_lock(&rbu_data.lock);
}
} else {
pr_warn("image_type is invalid\n");
spin_unlock(&rbu_data.lock);
return -EINVAL;
}
/* we must free all previous allocations */
packet_empty_list();
img_update_free();
spin_unlock(&rbu_data.lock);
return rc;
}
static BIN_ATTR_RW(image_type, 0);
static ssize_t packet_size_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
int size = 0;
if (!pos) {
spin_lock(&rbu_data.lock);
size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
spin_unlock(&rbu_data.lock);
}
return size;
}
static ssize_t packet_size_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
unsigned long temp;
spin_lock(&rbu_data.lock);
packet_empty_list();
sscanf(buffer, "%lu", &temp);
if (temp < 0xffffffff)
rbu_data.packetsize = temp;
spin_unlock(&rbu_data.lock);
return count;
}
static BIN_ATTR_RW(packet_size, 0);
static struct bin_attribute *rbu_bin_attrs[] = {
&bin_attr_data,
&bin_attr_image_type,
&bin_attr_packet_size,
NULL
};
static const struct attribute_group rbu_group = {
.bin_attrs = rbu_bin_attrs,
};
static int __init dcdrbu_init(void)
{
int rc;
spin_lock_init(&rbu_data.lock);
init_packet_head();
rbu_device = platform_device_register_simple("dell_rbu", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(rbu_device)) {
pr_err("platform_device_register_simple failed\n");
return PTR_ERR(rbu_device);
}
rc = sysfs_create_group(&rbu_device->dev.kobj, &rbu_group);
if (rc)
goto out_devreg;
rbu_data.entry_created = 0;
return 0;
out_devreg:
platform_device_unregister(rbu_device);
return rc;
}
static __exit void dcdrbu_exit(void)
{
spin_lock(&rbu_data.lock);
packet_empty_list();
img_update_free();
spin_unlock(&rbu_data.lock);
sysfs_remove_group(&rbu_device->dev.kobj, &rbu_group);
platform_device_unregister(rbu_device);
}
module_exit(dcdrbu_exit);
module_init(dcdrbu_init);
| linux-master | drivers/platform/x86/dell/dell_rbu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* WMI hotkeys support for Dell All-In-One series
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/acpi.h>
#include <linux/string.h>
MODULE_DESCRIPTION("WMI hotkeys driver for Dell All-In-One series");
MODULE_LICENSE("GPL");
#define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
#define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
struct dell_wmi_event {
u16 length;
/* 0x000: A hot key pressed or an event occurred
* 0x00F: A sequence of hot keys are pressed */
u16 type;
u16 event[];
};
static const char *dell_wmi_aio_guids[] = {
EVENT_GUID1,
EVENT_GUID2,
NULL
};
MODULE_ALIAS("wmi:"EVENT_GUID1);
MODULE_ALIAS("wmi:"EVENT_GUID2);
static const struct key_entry dell_wmi_aio_keymap[] = {
{ KE_KEY, 0xc0, { KEY_VOLUMEUP } },
{ KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0xe030, { KEY_VOLUMEUP } },
{ KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0xe020, { KEY_MUTE } },
{ KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
{ KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
{ KE_END, 0 }
};
static struct input_dev *dell_wmi_aio_input_dev;
/*
* The new WMI event data format will follow the dell_wmi_event structure
* So, we will check if the buffer matches the format
*/
static bool dell_wmi_aio_event_check(u8 *buffer, int length)
{
struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
if (event == NULL || length < 6)
return false;
if ((event->type == 0 || event->type == 0xf) &&
event->length >= 2)
return true;
return false;
}
static void dell_wmi_aio_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct dell_wmi_event *event;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (obj) {
unsigned int scancode = 0;
switch (obj->type) {
case ACPI_TYPE_INTEGER:
/* Most All-In-One correctly return integer scancode */
scancode = obj->integer.value;
sparse_keymap_report_event(dell_wmi_aio_input_dev,
scancode, 1, true);
break;
case ACPI_TYPE_BUFFER:
if (dell_wmi_aio_event_check(obj->buffer.pointer,
obj->buffer.length)) {
event = (struct dell_wmi_event *)
obj->buffer.pointer;
scancode = event->event[0];
} else {
/* Broken machines return the scancode in a
buffer */
if (obj->buffer.pointer &&
obj->buffer.length > 0)
scancode = obj->buffer.pointer[0];
}
if (scancode)
sparse_keymap_report_event(
dell_wmi_aio_input_dev,
scancode, 1, true);
break;
}
}
kfree(obj);
}
static int __init dell_wmi_aio_input_setup(void)
{
int err;
dell_wmi_aio_input_dev = input_allocate_device();
if (!dell_wmi_aio_input_dev)
return -ENOMEM;
dell_wmi_aio_input_dev->name = "Dell AIO WMI hotkeys";
dell_wmi_aio_input_dev->phys = "wmi/input0";
dell_wmi_aio_input_dev->id.bustype = BUS_HOST;
err = sparse_keymap_setup(dell_wmi_aio_input_dev,
dell_wmi_aio_keymap, NULL);
if (err) {
pr_err("Unable to setup input device keymap\n");
goto err_free_dev;
}
err = input_register_device(dell_wmi_aio_input_dev);
if (err) {
pr_info("Unable to register input device\n");
goto err_free_dev;
}
return 0;
err_free_dev:
input_free_device(dell_wmi_aio_input_dev);
return err;
}
static const char *dell_wmi_aio_find(void)
{
int i;
for (i = 0; dell_wmi_aio_guids[i] != NULL; i++)
if (wmi_has_guid(dell_wmi_aio_guids[i]))
return dell_wmi_aio_guids[i];
return NULL;
}
static int __init dell_wmi_aio_init(void)
{
int err;
const char *guid;
guid = dell_wmi_aio_find();
if (!guid) {
pr_warn("No known WMI GUID found\n");
return -ENXIO;
}
err = dell_wmi_aio_input_setup();
if (err)
return err;
err = wmi_install_notify_handler(guid, dell_wmi_aio_notify, NULL);
if (err) {
pr_err("Unable to register notify handler - %d\n", err);
input_unregister_device(dell_wmi_aio_input_dev);
return err;
}
return 0;
}
static void __exit dell_wmi_aio_exit(void)
{
const char *guid;
guid = dell_wmi_aio_find();
wmi_remove_notify_handler(guid);
input_unregister_device(dell_wmi_aio_input_dev);
}
module_init(dell_wmi_aio_init);
module_exit(dell_wmi_aio_exit);
| linux-master | drivers/platform/x86/dell/dell-wmi-aio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux driver for WMI sensor information on Dell notebooks.
*
* Copyright (C) 2022 Armin Wolf <[email protected]>
*/
#define pr_format(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/device/driver.h>
#include <linux/dev_printk.h>
#include <linux/errno.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/kstrtox.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/limits.h>
#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/wmi.h>
#include <acpi/battery.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "dell-wmi-ddv"
#define DELL_DDV_SUPPORTED_VERSION_MIN 2
#define DELL_DDV_SUPPORTED_VERSION_MAX 3
#define DELL_DDV_GUID "8A42EA14-4F2A-FD45-6422-0087F7A7E608"
#define DELL_EPPID_LENGTH 20
#define DELL_EPPID_EXT_LENGTH 23
static bool force;
module_param_unsafe(force, bool, 0);
MODULE_PARM_DESC(force, "Force loading without checking for supported WMI interface versions");
enum dell_ddv_method {
DELL_DDV_BATTERY_DESIGN_CAPACITY = 0x01,
DELL_DDV_BATTERY_FULL_CHARGE_CAPACITY = 0x02,
DELL_DDV_BATTERY_MANUFACTURE_NAME = 0x03,
DELL_DDV_BATTERY_MANUFACTURE_DATE = 0x04,
DELL_DDV_BATTERY_SERIAL_NUMBER = 0x05,
DELL_DDV_BATTERY_CHEMISTRY_VALUE = 0x06,
DELL_DDV_BATTERY_TEMPERATURE = 0x07,
DELL_DDV_BATTERY_CURRENT = 0x08,
DELL_DDV_BATTERY_VOLTAGE = 0x09,
DELL_DDV_BATTERY_MANUFACTURER_ACCESS = 0x0A,
DELL_DDV_BATTERY_RELATIVE_CHARGE_STATE = 0x0B,
DELL_DDV_BATTERY_CYCLE_COUNT = 0x0C,
DELL_DDV_BATTERY_EPPID = 0x0D,
DELL_DDV_BATTERY_RAW_ANALYTICS_START = 0x0E,
DELL_DDV_BATTERY_RAW_ANALYTICS = 0x0F,
DELL_DDV_BATTERY_DESIGN_VOLTAGE = 0x10,
DELL_DDV_BATTERY_RAW_ANALYTICS_A_BLOCK = 0x11, /* version 3 */
DELL_DDV_INTERFACE_VERSION = 0x12,
DELL_DDV_FAN_SENSOR_INFORMATION = 0x20,
DELL_DDV_THERMAL_SENSOR_INFORMATION = 0x22,
};
struct fan_sensor_entry {
u8 type;
__le16 rpm;
} __packed;
struct thermal_sensor_entry {
u8 type;
s8 now;
s8 min;
s8 max;
u8 unknown;
} __packed;
struct combined_channel_info {
struct hwmon_channel_info info;
u32 config[];
};
struct combined_chip_info {
struct hwmon_chip_info chip;
const struct hwmon_channel_info *info[];
};
struct dell_wmi_ddv_sensors {
bool active;
struct mutex lock; /* protect caching */
unsigned long timestamp;
union acpi_object *obj;
u64 entries;
};
struct dell_wmi_ddv_data {
struct acpi_battery_hook hook;
struct device_attribute temp_attr;
struct device_attribute eppid_attr;
struct dell_wmi_ddv_sensors fans;
struct dell_wmi_ddv_sensors temps;
struct wmi_device *wdev;
};
static const char * const fan_labels[] = {
"CPU Fan",
"Chassis Motherboard Fan",
"Video Fan",
"Power Supply Fan",
"Chipset Fan",
"Memory Fan",
"PCI Fan",
"HDD Fan",
};
static const char * const fan_dock_labels[] = {
"Docking Chassis/Motherboard Fan",
"Docking Video Fan",
"Docking Power Supply Fan",
"Docking Chipset Fan",
};
static int dell_wmi_ddv_query_type(struct wmi_device *wdev, enum dell_ddv_method method, u32 arg,
union acpi_object **result, acpi_object_type type)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
const struct acpi_buffer in = {
.length = sizeof(arg),
.pointer = &arg,
};
union acpi_object *obj;
acpi_status ret;
ret = wmidev_evaluate_method(wdev, 0x0, method, &in, &out);
if (ACPI_FAILURE(ret))
return -EIO;
obj = out.pointer;
if (!obj)
return -ENODATA;
if (obj->type != type) {
kfree(obj);
return -ENOMSG;
}
*result = obj;
return 0;
}
static int dell_wmi_ddv_query_integer(struct wmi_device *wdev, enum dell_ddv_method method,
u32 arg, u32 *res)
{
union acpi_object *obj;
int ret;
ret = dell_wmi_ddv_query_type(wdev, method, arg, &obj, ACPI_TYPE_INTEGER);
if (ret < 0)
return ret;
if (obj->integer.value <= U32_MAX)
*res = (u32)obj->integer.value;
else
ret = -ERANGE;
kfree(obj);
return ret;
}
static int dell_wmi_ddv_query_buffer(struct wmi_device *wdev, enum dell_ddv_method method,
u32 arg, union acpi_object **result)
{
union acpi_object *obj;
u64 buffer_size;
int ret;
ret = dell_wmi_ddv_query_type(wdev, method, arg, &obj, ACPI_TYPE_PACKAGE);
if (ret < 0)
return ret;
if (obj->package.count != 2 ||
obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
obj->package.elements[1].type != ACPI_TYPE_BUFFER) {
ret = -ENOMSG;
goto err_free;
}
buffer_size = obj->package.elements[0].integer.value;
if (!buffer_size) {
ret = -ENODATA;
goto err_free;
}
if (buffer_size > obj->package.elements[1].buffer.length) {
dev_warn(&wdev->dev,
FW_WARN "WMI buffer size (%llu) exceeds ACPI buffer size (%d)\n",
buffer_size, obj->package.elements[1].buffer.length);
ret = -EMSGSIZE;
goto err_free;
}
*result = obj;
return 0;
err_free:
kfree(obj);
return ret;
}
static int dell_wmi_ddv_query_string(struct wmi_device *wdev, enum dell_ddv_method method,
u32 arg, union acpi_object **result)
{
return dell_wmi_ddv_query_type(wdev, method, arg, result, ACPI_TYPE_STRING);
}
/*
* Needs to be called with lock held, except during initialization.
*/
static int dell_wmi_ddv_update_sensors(struct wmi_device *wdev, enum dell_ddv_method method,
struct dell_wmi_ddv_sensors *sensors, size_t entry_size)
{
u64 buffer_size, rem, entries;
union acpi_object *obj;
u8 *buffer;
int ret;
if (sensors->obj) {
if (time_before(jiffies, sensors->timestamp + HZ))
return 0;
kfree(sensors->obj);
sensors->obj = NULL;
}
ret = dell_wmi_ddv_query_buffer(wdev, method, 0, &obj);
if (ret < 0)
return ret;
/* buffer format sanity check */
buffer_size = obj->package.elements[0].integer.value;
buffer = obj->package.elements[1].buffer.pointer;
entries = div64_u64_rem(buffer_size, entry_size, &rem);
if (rem != 1 || buffer[buffer_size - 1] != 0xff) {
ret = -ENOMSG;
goto err_free;
}
if (!entries) {
ret = -ENODATA;
goto err_free;
}
sensors->obj = obj;
sensors->entries = entries;
sensors->timestamp = jiffies;
return 0;
err_free:
kfree(obj);
return ret;
}
static umode_t dell_wmi_ddv_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
int channel)
{
return 0444;
}
static int dell_wmi_ddv_fan_read_channel(struct dell_wmi_ddv_data *data, u32 attr, int channel,
long *val)
{
struct fan_sensor_entry *entry;
int ret;
ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_FAN_SENSOR_INFORMATION,
&data->fans, sizeof(*entry));
if (ret < 0)
return ret;
if (channel >= data->fans.entries)
return -ENXIO;
entry = (struct fan_sensor_entry *)data->fans.obj->package.elements[1].buffer.pointer;
switch (attr) {
case hwmon_fan_input:
*val = get_unaligned_le16(&entry[channel].rpm);
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static int dell_wmi_ddv_temp_read_channel(struct dell_wmi_ddv_data *data, u32 attr, int channel,
long *val)
{
struct thermal_sensor_entry *entry;
int ret;
ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION,
&data->temps, sizeof(*entry));
if (ret < 0)
return ret;
if (channel >= data->temps.entries)
return -ENXIO;
entry = (struct thermal_sensor_entry *)data->temps.obj->package.elements[1].buffer.pointer;
switch (attr) {
case hwmon_temp_input:
*val = entry[channel].now * 1000;
return 0;
case hwmon_temp_min:
*val = entry[channel].min * 1000;
return 0;
case hwmon_temp_max:
*val = entry[channel].max * 1000;
return 0;
default:
break;
}
return -EOPNOTSUPP;
}
static int dell_wmi_ddv_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
int ret;
switch (type) {
case hwmon_fan:
mutex_lock(&data->fans.lock);
ret = dell_wmi_ddv_fan_read_channel(data, attr, channel, val);
mutex_unlock(&data->fans.lock);
return ret;
case hwmon_temp:
mutex_lock(&data->temps.lock);
ret = dell_wmi_ddv_temp_read_channel(data, attr, channel, val);
mutex_unlock(&data->temps.lock);
return ret;
default:
break;
}
return -EOPNOTSUPP;
}
static int dell_wmi_ddv_fan_read_string(struct dell_wmi_ddv_data *data, int channel,
const char **str)
{
struct fan_sensor_entry *entry;
int ret;
u8 type;
ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_FAN_SENSOR_INFORMATION,
&data->fans, sizeof(*entry));
if (ret < 0)
return ret;
if (channel >= data->fans.entries)
return -ENXIO;
entry = (struct fan_sensor_entry *)data->fans.obj->package.elements[1].buffer.pointer;
type = entry[channel].type;
switch (type) {
case 0x00 ... 0x07:
*str = fan_labels[type];
break;
case 0x11 ... 0x14:
*str = fan_dock_labels[type - 0x11];
break;
default:
*str = "Unknown Fan";
break;
}
return 0;
}
static int dell_wmi_ddv_temp_read_string(struct dell_wmi_ddv_data *data, int channel,
const char **str)
{
struct thermal_sensor_entry *entry;
int ret;
ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION,
&data->temps, sizeof(*entry));
if (ret < 0)
return ret;
if (channel >= data->temps.entries)
return -ENXIO;
entry = (struct thermal_sensor_entry *)data->temps.obj->package.elements[1].buffer.pointer;
switch (entry[channel].type) {
case 0x00:
*str = "CPU";
break;
case 0x11:
*str = "Video";
break;
case 0x22:
*str = "Memory"; /* sometimes called DIMM */
break;
case 0x33:
*str = "Other";
break;
case 0x44:
*str = "Ambient"; /* sometimes called SKIN */
break;
case 0x52:
*str = "SODIMM";
break;
case 0x55:
*str = "HDD";
break;
case 0x62:
*str = "SODIMM 2";
break;
case 0x73:
*str = "NB";
break;
case 0x83:
*str = "Charger";
break;
case 0xbb:
*str = "Memory 3";
break;
default:
*str = "Unknown";
break;
}
return 0;
}
static int dell_wmi_ddv_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, const char **str)
{
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
int ret;
switch (type) {
case hwmon_fan:
switch (attr) {
case hwmon_fan_label:
mutex_lock(&data->fans.lock);
ret = dell_wmi_ddv_fan_read_string(data, channel, str);
mutex_unlock(&data->fans.lock);
return ret;
default:
break;
}
break;
case hwmon_temp:
switch (attr) {
case hwmon_temp_label:
mutex_lock(&data->temps.lock);
ret = dell_wmi_ddv_temp_read_string(data, channel, str);
mutex_unlock(&data->temps.lock);
return ret;
default:
break;
}
break;
default:
break;
}
return -EOPNOTSUPP;
}
static const struct hwmon_ops dell_wmi_ddv_ops = {
.is_visible = dell_wmi_ddv_is_visible,
.read = dell_wmi_ddv_read,
.read_string = dell_wmi_ddv_read_string,
};
static struct hwmon_channel_info *dell_wmi_ddv_channel_create(struct device *dev, u64 count,
enum hwmon_sensor_types type,
u32 config)
{
struct combined_channel_info *cinfo;
int i;
cinfo = devm_kzalloc(dev, struct_size(cinfo, config, count + 1), GFP_KERNEL);
if (!cinfo)
return ERR_PTR(-ENOMEM);
cinfo->info.type = type;
cinfo->info.config = cinfo->config;
for (i = 0; i < count; i++)
cinfo->config[i] = config;
return &cinfo->info;
}
static void dell_wmi_ddv_hwmon_cache_invalidate(struct dell_wmi_ddv_sensors *sensors)
{
if (!sensors->active)
return;
mutex_lock(&sensors->lock);
kfree(sensors->obj);
sensors->obj = NULL;
mutex_unlock(&sensors->lock);
}
static void dell_wmi_ddv_hwmon_cache_destroy(void *data)
{
struct dell_wmi_ddv_sensors *sensors = data;
sensors->active = false;
mutex_destroy(&sensors->lock);
kfree(sensors->obj);
}
static struct hwmon_channel_info *dell_wmi_ddv_channel_init(struct wmi_device *wdev,
enum dell_ddv_method method,
struct dell_wmi_ddv_sensors *sensors,
size_t entry_size,
enum hwmon_sensor_types type,
u32 config)
{
struct hwmon_channel_info *info;
int ret;
ret = dell_wmi_ddv_update_sensors(wdev, method, sensors, entry_size);
if (ret < 0)
return ERR_PTR(ret);
mutex_init(&sensors->lock);
sensors->active = true;
ret = devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
if (ret < 0)
return ERR_PTR(ret);
info = dell_wmi_ddv_channel_create(&wdev->dev, sensors->entries, type, config);
if (IS_ERR(info))
devm_release_action(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
return info;
}
static int dell_wmi_ddv_hwmon_add(struct dell_wmi_ddv_data *data)
{
struct wmi_device *wdev = data->wdev;
struct combined_chip_info *cinfo;
struct hwmon_channel_info *info;
struct device *hdev;
int index = 0;
int ret;
if (!devres_open_group(&wdev->dev, dell_wmi_ddv_hwmon_add, GFP_KERNEL))
return -ENOMEM;
cinfo = devm_kzalloc(&wdev->dev, struct_size(cinfo, info, 4), GFP_KERNEL);
if (!cinfo) {
ret = -ENOMEM;
goto err_release;
}
cinfo->chip.ops = &dell_wmi_ddv_ops;
cinfo->chip.info = cinfo->info;
info = dell_wmi_ddv_channel_create(&wdev->dev, 1, hwmon_chip, HWMON_C_REGISTER_TZ);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_release;
}
cinfo->info[index] = info;
index++;
info = dell_wmi_ddv_channel_init(wdev, DELL_DDV_FAN_SENSOR_INFORMATION, &data->fans,
sizeof(struct fan_sensor_entry), hwmon_fan,
(HWMON_F_INPUT | HWMON_F_LABEL));
if (!IS_ERR(info)) {
cinfo->info[index] = info;
index++;
}
info = dell_wmi_ddv_channel_init(wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION, &data->temps,
sizeof(struct thermal_sensor_entry), hwmon_temp,
(HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
HWMON_T_LABEL));
if (!IS_ERR(info)) {
cinfo->info[index] = info;
index++;
}
if (index < 2) {
/* Finding no available sensors is not an error */
ret = 0;
goto err_release;
}
hdev = devm_hwmon_device_register_with_info(&wdev->dev, "dell_ddv", data, &cinfo->chip,
NULL);
if (IS_ERR(hdev)) {
ret = PTR_ERR(hdev);
goto err_release;
}
devres_close_group(&wdev->dev, dell_wmi_ddv_hwmon_add);
return 0;
err_release:
devres_release_group(&wdev->dev, dell_wmi_ddv_hwmon_add);
return ret;
}
static int dell_wmi_ddv_battery_index(struct acpi_device *acpi_dev, u32 *index)
{
const char *uid_str;
uid_str = acpi_device_uid(acpi_dev);
if (!uid_str)
return -ENODEV;
return kstrtou32(uid_str, 10, index);
}
static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, temp_attr);
u32 index, value;
int ret;
ret = dell_wmi_ddv_battery_index(to_acpi_device(dev->parent), &index);
if (ret < 0)
return ret;
ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index, &value);
if (ret < 0)
return ret;
/* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
return sysfs_emit(buf, "%d\n", value - 2731);
}
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, eppid_attr);
union acpi_object *obj;
u32 index;
int ret;
ret = dell_wmi_ddv_battery_index(to_acpi_device(dev->parent), &index);
if (ret < 0)
return ret;
ret = dell_wmi_ddv_query_string(data->wdev, DELL_DDV_BATTERY_EPPID, index, &obj);
if (ret < 0)
return ret;
if (obj->string.length != DELL_EPPID_LENGTH && obj->string.length != DELL_EPPID_EXT_LENGTH)
dev_info_once(&data->wdev->dev, FW_INFO "Suspicious ePPID length (%d)\n",
obj->string.length);
ret = sysfs_emit(buf, "%s\n", obj->string.pointer);
kfree(obj);
return ret;
}
static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
u32 index;
int ret;
/* Return 0 instead of error to avoid being unloaded */
ret = dell_wmi_ddv_battery_index(to_acpi_device(battery->dev.parent), &index);
if (ret < 0)
return 0;
ret = device_create_file(&battery->dev, &data->temp_attr);
if (ret < 0)
return ret;
ret = device_create_file(&battery->dev, &data->eppid_attr);
if (ret < 0) {
device_remove_file(&battery->dev, &data->temp_attr);
return ret;
}
return 0;
}
static int dell_wmi_ddv_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
device_remove_file(&battery->dev, &data->temp_attr);
device_remove_file(&battery->dev, &data->eppid_attr);
return 0;
}
static void dell_wmi_ddv_battery_remove(void *data)
{
struct acpi_battery_hook *hook = data;
battery_hook_unregister(hook);
}
static int dell_wmi_ddv_battery_add(struct dell_wmi_ddv_data *data)
{
data->hook.name = "Dell DDV Battery Extension";
data->hook.add_battery = dell_wmi_ddv_add_battery;
data->hook.remove_battery = dell_wmi_ddv_remove_battery;
sysfs_attr_init(&data->temp_attr.attr);
data->temp_attr.attr.name = "temp";
data->temp_attr.attr.mode = 0444;
data->temp_attr.show = temp_show;
sysfs_attr_init(&data->eppid_attr.attr);
data->eppid_attr.attr.name = "eppid";
data->eppid_attr.attr.mode = 0444;
data->eppid_attr.show = eppid_show;
battery_hook_register(&data->hook);
return devm_add_action_or_reset(&data->wdev->dev, dell_wmi_ddv_battery_remove, &data->hook);
}
static int dell_wmi_ddv_buffer_read(struct seq_file *seq, enum dell_ddv_method method)
{
struct device *dev = seq->private;
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
union acpi_object *obj;
u64 size;
u8 *buf;
int ret;
ret = dell_wmi_ddv_query_buffer(data->wdev, method, 0, &obj);
if (ret < 0)
return ret;
size = obj->package.elements[0].integer.value;
buf = obj->package.elements[1].buffer.pointer;
ret = seq_write(seq, buf, size);
kfree(obj);
return ret;
}
static int dell_wmi_ddv_fan_read(struct seq_file *seq, void *offset)
{
return dell_wmi_ddv_buffer_read(seq, DELL_DDV_FAN_SENSOR_INFORMATION);
}
static int dell_wmi_ddv_temp_read(struct seq_file *seq, void *offset)
{
return dell_wmi_ddv_buffer_read(seq, DELL_DDV_THERMAL_SENSOR_INFORMATION);
}
static void dell_wmi_ddv_debugfs_remove(void *data)
{
struct dentry *entry = data;
debugfs_remove(entry);
}
static void dell_wmi_ddv_debugfs_init(struct wmi_device *wdev)
{
struct dentry *entry;
char name[64];
scnprintf(name, ARRAY_SIZE(name), "%s-%s", DRIVER_NAME, dev_name(&wdev->dev));
entry = debugfs_create_dir(name, NULL);
debugfs_create_devm_seqfile(&wdev->dev, "fan_sensor_information", entry,
dell_wmi_ddv_fan_read);
debugfs_create_devm_seqfile(&wdev->dev, "thermal_sensor_information", entry,
dell_wmi_ddv_temp_read);
devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_debugfs_remove, entry);
}
static int dell_wmi_ddv_probe(struct wmi_device *wdev, const void *context)
{
struct dell_wmi_ddv_data *data;
u32 version;
int ret;
ret = dell_wmi_ddv_query_integer(wdev, DELL_DDV_INTERFACE_VERSION, 0, &version);
if (ret < 0)
return ret;
dev_dbg(&wdev->dev, "WMI interface version: %d\n", version);
if (version < DELL_DDV_SUPPORTED_VERSION_MIN || version > DELL_DDV_SUPPORTED_VERSION_MAX) {
if (!force)
return -ENODEV;
dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u)\n",
version);
}
data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, data);
data->wdev = wdev;
dell_wmi_ddv_debugfs_init(wdev);
if (IS_REACHABLE(CONFIG_ACPI_BATTERY)) {
ret = dell_wmi_ddv_battery_add(data);
if (ret < 0)
dev_warn(&wdev->dev, "Unable to register ACPI battery hook: %d\n", ret);
}
if (IS_REACHABLE(CONFIG_HWMON)) {
ret = dell_wmi_ddv_hwmon_add(data);
if (ret < 0)
dev_warn(&wdev->dev, "Unable to register hwmon interface: %d\n", ret);
}
return 0;
}
static int dell_wmi_ddv_resume(struct device *dev)
{
struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
/* Force re-reading of all active sensors */
dell_wmi_ddv_hwmon_cache_invalidate(&data->fans);
dell_wmi_ddv_hwmon_cache_invalidate(&data->temps);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(dell_wmi_ddv_dev_pm_ops, NULL, dell_wmi_ddv_resume);
static const struct wmi_device_id dell_wmi_ddv_id_table[] = {
{ DELL_DDV_GUID, NULL },
{ }
};
MODULE_DEVICE_TABLE(wmi, dell_wmi_ddv_id_table);
static struct wmi_driver dell_wmi_ddv_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = pm_sleep_ptr(&dell_wmi_ddv_dev_pm_ops),
},
.id_table = dell_wmi_ddv_id_table,
.probe = dell_wmi_ddv_probe,
};
module_wmi_driver(dell_wmi_ddv_driver);
MODULE_AUTHOR("Armin Wolf <[email protected]>");
MODULE_DESCRIPTION("Dell WMI sensor driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-wmi-ddv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SMI methods for use with dell-smbios
*
* Copyright (c) Red Hat <[email protected]>
* Copyright (c) 2014 Gabriele Mazzotta <[email protected]>
* Copyright (c) 2014 Pali Rohár <[email protected]>
* Copyright (c) 2017 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dmi.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include "dcdbas.h"
#include "dell-smbios.h"
static int da_command_address;
static int da_command_code;
static struct smi_buffer smi_buf;
static struct calling_interface_buffer *buffer;
static struct platform_device *platform_device;
static DEFINE_MUTEX(smm_mutex);
static void parse_da_table(const struct dmi_header *dm)
{
struct calling_interface_structure *table =
container_of(dm, struct calling_interface_structure, header);
/* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
* 6 bytes of entry
*/
if (dm->length < 17)
return;
da_command_address = table->cmdIOAddress;
da_command_code = table->cmdIOCode;
}
static void find_cmd_address(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0xda: /* Calling interface */
parse_da_table(dm);
break;
}
}
static int dell_smbios_smm_call(struct calling_interface_buffer *input)
{
struct smi_cmd command;
size_t size;
size = sizeof(struct calling_interface_buffer);
command.magic = SMI_CMD_MAGIC;
command.command_address = da_command_address;
command.command_code = da_command_code;
command.ebx = smi_buf.dma;
command.ecx = 0x42534931;
mutex_lock(&smm_mutex);
memcpy(buffer, input, size);
dcdbas_smi_request(&command);
memcpy(input, buffer, size);
mutex_unlock(&smm_mutex);
return 0;
}
/* When enabled this indicates that SMM won't work */
static bool test_wsmt_enabled(void)
{
struct calling_interface_token *wsmt;
/* if token doesn't exist, SMM will work */
wsmt = dell_smbios_find_token(WSMT_EN_TOKEN);
if (!wsmt)
return false;
/* If token exists, try to access over SMM but set a dummy return.
* - If WSMT disabled it will be overwritten by SMM
* - If WSMT enabled then dummy value will remain
*/
buffer->cmd_class = CLASS_TOKEN_READ;
buffer->cmd_select = SELECT_TOKEN_STD;
memset(buffer, 0, sizeof(struct calling_interface_buffer));
buffer->input[0] = wsmt->location;
buffer->output[0] = 99;
dell_smbios_smm_call(buffer);
if (buffer->output[0] == 99)
return true;
return false;
}
int init_dell_smbios_smm(void)
{
int ret;
/*
* Allocate buffer below 4GB for SMI data--only 32-bit physical addr
* is passed to SMI handler.
*/
ret = dcdbas_smi_alloc(&smi_buf, PAGE_SIZE);
if (ret)
return ret;
buffer = (void *)smi_buf.virt;
dmi_walk(find_cmd_address, NULL);
if (test_wsmt_enabled()) {
pr_debug("Disabling due to WSMT enabled\n");
ret = -ENODEV;
goto fail_wsmt;
}
platform_device = platform_device_alloc("dell-smbios", 1);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device_alloc;
}
ret = platform_device_add(platform_device);
if (ret)
goto fail_platform_device_add;
ret = dell_smbios_register_device(&platform_device->dev,
&dell_smbios_smm_call);
if (ret)
goto fail_register;
return 0;
fail_register:
platform_device_del(platform_device);
fail_platform_device_add:
platform_device_put(platform_device);
fail_wsmt:
fail_platform_device_alloc:
dcdbas_smi_free(&smi_buf);
return ret;
}
void exit_dell_smbios_smm(void)
{
if (platform_device) {
dell_smbios_unregister_device(&platform_device->dev);
platform_device_unregister(platform_device);
dcdbas_smi_free(&smi_buf);
}
}
| linux-master | drivers/platform/x86/dell/dell-smbios-smm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* WMI methods for use with dell-smbios
*
* Copyright (c) 2017 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dmi.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/wmi.h>
#include "dell-smbios.h"
#include "dell-wmi-descriptor.h"
static DEFINE_MUTEX(call_mutex);
static DEFINE_MUTEX(list_mutex);
static int wmi_supported;
struct misc_bios_flags_structure {
struct dmi_header header;
u16 flags0;
} __packed;
#define FLAG_HAS_ACPI_WMI 0x02
#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
struct wmi_smbios_priv {
struct dell_wmi_smbios_buffer *buf;
struct list_head list;
struct wmi_device *wdev;
struct device *child;
u32 req_buf_size;
};
static LIST_HEAD(wmi_list);
static inline struct wmi_smbios_priv *get_first_smbios_priv(void)
{
return list_first_entry_or_null(&wmi_list,
struct wmi_smbios_priv,
list);
}
static int run_smbios_call(struct wmi_device *wdev)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
struct wmi_smbios_priv *priv;
struct acpi_buffer input;
union acpi_object *obj;
acpi_status status;
priv = dev_get_drvdata(&wdev->dev);
input.length = priv->req_buf_size - sizeof(u64);
input.pointer = &priv->buf->std;
dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n",
priv->buf->std.cmd_class, priv->buf->std.cmd_select,
priv->buf->std.input[0], priv->buf->std.input[1],
priv->buf->std.input[2], priv->buf->std.input[3]);
status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj->type != ACPI_TYPE_BUFFER) {
dev_dbg(&wdev->dev, "received type: %d\n", obj->type);
if (obj->type == ACPI_TYPE_INTEGER)
dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
obj->integer.value);
kfree(output.pointer);
return -EIO;
}
memcpy(input.pointer, obj->buffer.pointer, obj->buffer.length);
dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
priv->buf->std.output[0], priv->buf->std.output[1],
priv->buf->std.output[2], priv->buf->std.output[3]);
kfree(output.pointer);
return 0;
}
static int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
{
struct wmi_smbios_priv *priv;
size_t difference;
size_t size;
int ret;
mutex_lock(&call_mutex);
priv = get_first_smbios_priv();
if (!priv) {
ret = -ENODEV;
goto out_wmi_call;
}
size = sizeof(struct calling_interface_buffer);
difference = priv->req_buf_size - sizeof(u64) - size;
memset(&priv->buf->ext, 0, difference);
memcpy(&priv->buf->std, buffer, size);
ret = run_smbios_call(priv->wdev);
memcpy(buffer, &priv->buf->std, size);
out_wmi_call:
mutex_unlock(&call_mutex);
return ret;
}
static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
struct wmi_ioctl_buffer *arg)
{
struct wmi_smbios_priv *priv;
int ret = 0;
switch (cmd) {
case DELL_WMI_SMBIOS_CMD:
mutex_lock(&call_mutex);
priv = dev_get_drvdata(&wdev->dev);
if (!priv) {
ret = -ENODEV;
goto fail_smbios_cmd;
}
memcpy(priv->buf, arg, priv->req_buf_size);
if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
priv->buf->std.cmd_class,
priv->buf->std.cmd_select,
priv->buf->std.input[0]);
ret = -EFAULT;
goto fail_smbios_cmd;
}
ret = run_smbios_call(priv->wdev);
if (ret)
goto fail_smbios_cmd;
memcpy(arg, priv->buf, priv->req_buf_size);
fail_smbios_cmd:
mutex_unlock(&call_mutex);
break;
default:
ret = -ENOIOCTLCMD;
}
return ret;
}
static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct wmi_driver *wdriver =
container_of(wdev->dev.driver, struct wmi_driver, driver);
struct wmi_smbios_priv *priv;
u32 hotfix;
int count;
int ret;
ret = dell_wmi_get_descriptor_valid();
if (ret)
return ret;
priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* WMI buffer size will be either 4k or 32k depending on machine */
if (!dell_wmi_get_size(&priv->req_buf_size))
return -EPROBE_DEFER;
/* some SMBIOS calls fail unless BIOS contains hotfix */
if (!dell_wmi_get_hotfix(&hotfix))
return -EPROBE_DEFER;
if (!hotfix) {
dev_warn(&wdev->dev,
"WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
hotfix);
wdriver->filter_callback = NULL;
}
/* add in the length object we will use internally with ioctl */
priv->req_buf_size += sizeof(u64);
ret = set_required_buffer_size(wdev, priv->req_buf_size);
if (ret)
return ret;
count = get_order(priv->req_buf_size);
priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
if (!priv->buf)
return -ENOMEM;
/* ID is used by dell-smbios to set priority of drivers */
wdev->dev.id = 1;
ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
if (ret)
goto fail_register;
priv->wdev = wdev;
dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
return 0;
fail_register:
free_pages((unsigned long)priv->buf, count);
return ret;
}
static void dell_smbios_wmi_remove(struct wmi_device *wdev)
{
struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
int count;
mutex_lock(&call_mutex);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
dell_smbios_unregister_device(&wdev->dev);
count = get_order(priv->req_buf_size);
free_pages((unsigned long)priv->buf, count);
mutex_unlock(&call_mutex);
}
static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
{ .guid_string = DELL_WMI_SMBIOS_GUID },
{ },
};
static void parse_b1_table(const struct dmi_header *dm)
{
struct misc_bios_flags_structure *flags =
container_of(dm, struct misc_bios_flags_structure, header);
/* 4 bytes header, 8 bytes flags */
if (dm->length < 12)
return;
if (dm->handle != 0xb100)
return;
if ((flags->flags0 & FLAG_HAS_ACPI_WMI))
wmi_supported = 1;
}
static void find_b1(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0xb1: /* misc bios flags */
parse_b1_table(dm);
break;
}
}
static struct wmi_driver dell_smbios_wmi_driver = {
.driver = {
.name = "dell-smbios",
},
.probe = dell_smbios_wmi_probe,
.remove = dell_smbios_wmi_remove,
.id_table = dell_smbios_wmi_id_table,
.filter_callback = dell_smbios_wmi_filter,
};
int init_dell_smbios_wmi(void)
{
dmi_walk(find_b1, NULL);
if (!wmi_supported)
return -ENODEV;
return wmi_driver_register(&dell_smbios_wmi_driver);
}
void exit_dell_smbios_wmi(void)
{
if (wmi_supported)
wmi_driver_unregister(&dell_smbios_wmi_driver);
}
MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
| linux-master | drivers/platform/x86/dell/dell-smbios-wmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
*
* Copyright (C) 2012 Sonal Santan <[email protected]>
* Copyright (C) 2014 Pali Rohár <[email protected]>
*
* This is loosely based on lis3lv02d driver.
*/
#define DRIVER_NAME "smo8800"
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
struct smo8800_device {
u32 irq; /* acpi device irq */
atomic_t counter; /* count after last read */
struct miscdevice miscdev; /* for /dev/freefall */
unsigned long misc_opened; /* whether the device is open */
wait_queue_head_t misc_wait; /* Wait queue for the misc dev */
struct device *dev; /* acpi device */
};
static irqreturn_t smo8800_interrupt_quick(int irq, void *data)
{
struct smo8800_device *smo8800 = data;
atomic_inc(&smo8800->counter);
wake_up_interruptible(&smo8800->misc_wait);
return IRQ_WAKE_THREAD;
}
static irqreturn_t smo8800_interrupt_thread(int irq, void *data)
{
struct smo8800_device *smo8800 = data;
dev_info(smo8800->dev, "detected free fall\n");
return IRQ_HANDLED;
}
static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct smo8800_device *smo8800 = container_of(file->private_data,
struct smo8800_device, miscdev);
u32 data = 0;
unsigned char byte_data;
ssize_t retval = 1;
if (count < 1)
return -EINVAL;
atomic_set(&smo8800->counter, 0);
retval = wait_event_interruptible(smo8800->misc_wait,
(data = atomic_xchg(&smo8800->counter, 0)));
if (retval)
return retval;
retval = 1;
byte_data = min_t(u32, data, 255);
if (put_user(byte_data, buf))
retval = -EFAULT;
return retval;
}
static int smo8800_misc_open(struct inode *inode, struct file *file)
{
struct smo8800_device *smo8800 = container_of(file->private_data,
struct smo8800_device, miscdev);
if (test_and_set_bit(0, &smo8800->misc_opened))
return -EBUSY; /* already open */
atomic_set(&smo8800->counter, 0);
return 0;
}
static int smo8800_misc_release(struct inode *inode, struct file *file)
{
struct smo8800_device *smo8800 = container_of(file->private_data,
struct smo8800_device, miscdev);
clear_bit(0, &smo8800->misc_opened); /* release the device */
return 0;
}
static const struct file_operations smo8800_misc_fops = {
.owner = THIS_MODULE,
.read = smo8800_misc_read,
.open = smo8800_misc_open,
.release = smo8800_misc_release,
};
static int smo8800_probe(struct platform_device *device)
{
int err;
struct smo8800_device *smo8800;
smo8800 = devm_kzalloc(&device->dev, sizeof(*smo8800), GFP_KERNEL);
if (!smo8800) {
dev_err(&device->dev, "failed to allocate device data\n");
return -ENOMEM;
}
smo8800->dev = &device->dev;
smo8800->miscdev.minor = MISC_DYNAMIC_MINOR;
smo8800->miscdev.name = "freefall";
smo8800->miscdev.fops = &smo8800_misc_fops;
init_waitqueue_head(&smo8800->misc_wait);
err = misc_register(&smo8800->miscdev);
if (err) {
dev_err(&device->dev, "failed to register misc dev: %d\n", err);
return err;
}
platform_set_drvdata(device, smo8800);
err = platform_get_irq(device, 0);
if (err < 0)
goto error;
smo8800->irq = err;
err = request_threaded_irq(smo8800->irq, smo8800_interrupt_quick,
smo8800_interrupt_thread,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
DRIVER_NAME, smo8800);
if (err) {
dev_err(&device->dev,
"failed to request thread for IRQ %d: %d\n",
smo8800->irq, err);
goto error;
}
dev_dbg(&device->dev, "device /dev/freefall registered with IRQ %d\n",
smo8800->irq);
return 0;
error:
misc_deregister(&smo8800->miscdev);
return err;
}
static void smo8800_remove(struct platform_device *device)
{
struct smo8800_device *smo8800 = platform_get_drvdata(device);
free_irq(smo8800->irq, smo8800);
misc_deregister(&smo8800->miscdev);
dev_dbg(&device->dev, "device /dev/freefall unregistered\n");
}
/* NOTE: Keep this list in sync with drivers/i2c/busses/i2c-i801.c */
static const struct acpi_device_id smo8800_ids[] = {
{ "SMO8800", 0 },
{ "SMO8801", 0 },
{ "SMO8810", 0 },
{ "SMO8811", 0 },
{ "SMO8820", 0 },
{ "SMO8821", 0 },
{ "SMO8830", 0 },
{ "SMO8831", 0 },
{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, smo8800_ids);
static struct platform_driver smo8800_driver = {
.probe = smo8800_probe,
.remove_new = smo8800_remove,
.driver = {
.name = DRIVER_NAME,
.acpi_match_table = smo8800_ids,
},
};
module_platform_driver(smo8800_driver);
MODULE_DESCRIPTION("Dell Latitude freefall driver (ACPI SMO88XX)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonal Santan, Pali Rohár");
| linux-master | drivers/platform/x86/dell/dell-smo8800.c |
/*
* Copyright (C) 2010 Dell Inc.
* Louis Davis <[email protected]>
* Jim Dailey <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
*/
#include <linux/acpi.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/module.h>
MODULE_AUTHOR("Louis Davis/Jim Dailey");
MODULE_DESCRIPTION("Dell LED Control Driver");
MODULE_LICENSE("GPL");
#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396"
MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
/* Error Result Codes: */
#define INVALID_DEVICE_ID 250
#define INVALID_PARAMETER 251
#define INVALID_BUFFER 252
#define INTERFACE_ERROR 253
#define UNSUPPORTED_COMMAND 254
#define UNSPECIFIED_ERROR 255
/* Device ID */
#define DEVICE_ID_PANEL_BACK 1
/* LED Commands */
#define CMD_LED_ON 16
#define CMD_LED_OFF 17
#define CMD_LED_BLINK 18
struct bios_args {
unsigned char length;
unsigned char result_code;
unsigned char device_id;
unsigned char command;
unsigned char on_time;
unsigned char off_time;
};
static int dell_led_perform_fn(u8 length, u8 result_code, u8 device_id,
u8 command, u8 on_time, u8 off_time)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct bios_args *bios_return;
struct acpi_buffer input;
union acpi_object *obj;
acpi_status status;
u8 return_code;
struct bios_args args = {
.length = length,
.result_code = result_code,
.device_id = device_id,
.command = command,
.on_time = on_time,
.off_time = off_time
};
input.length = sizeof(struct bios_args);
input.pointer = &args;
status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 0, 1, &input, &output);
if (ACPI_FAILURE(status))
return status;
obj = output.pointer;
if (!obj)
return -EINVAL;
if (obj->type != ACPI_TYPE_BUFFER) {
kfree(obj);
return -EINVAL;
}
bios_return = ((struct bios_args *)obj->buffer.pointer);
return_code = bios_return->result_code;
kfree(obj);
return return_code;
}
static int led_on(void)
{
return dell_led_perform_fn(3, /* Length of command */
INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
DEVICE_ID_PANEL_BACK, /* Device ID */
CMD_LED_ON, /* Command */
0, /* not used */
0); /* not used */
}
static int led_off(void)
{
return dell_led_perform_fn(3, /* Length of command */
INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
DEVICE_ID_PANEL_BACK, /* Device ID */
CMD_LED_OFF, /* Command */
0, /* not used */
0); /* not used */
}
static int led_blink(unsigned char on_eighths, unsigned char off_eighths)
{
return dell_led_perform_fn(5, /* Length of command */
INTERFACE_ERROR, /* Init to INTERFACE_ERROR */
DEVICE_ID_PANEL_BACK, /* Device ID */
CMD_LED_BLINK, /* Command */
on_eighths, /* blink on in eigths of a second */
off_eighths); /* blink off in eights of a second */
}
static void dell_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value == LED_OFF)
led_off();
else
led_on();
}
static int dell_led_blink(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off)
{
unsigned long on_eighths;
unsigned long off_eighths;
/*
* The Dell LED delay is based on 125ms intervals.
* Need to round up to next interval.
*/
on_eighths = DIV_ROUND_UP(*delay_on, 125);
on_eighths = clamp_t(unsigned long, on_eighths, 1, 255);
*delay_on = on_eighths * 125;
off_eighths = DIV_ROUND_UP(*delay_off, 125);
off_eighths = clamp_t(unsigned long, off_eighths, 1, 255);
*delay_off = off_eighths * 125;
led_blink(on_eighths, off_eighths);
return 0;
}
static struct led_classdev dell_led = {
.name = "dell::lid",
.brightness = LED_OFF,
.max_brightness = 1,
.brightness_set = dell_led_set,
.blink_set = dell_led_blink,
.flags = LED_CORE_SUSPENDRESUME,
};
static int __init dell_led_init(void)
{
int error = 0;
if (!wmi_has_guid(DELL_LED_BIOS_GUID))
return -ENODEV;
error = led_off();
if (error != 0)
return -ENODEV;
return led_classdev_register(NULL, &dell_led);
}
static void __exit dell_led_exit(void)
{
led_classdev_unregister(&dell_led);
led_off();
}
module_init(dell_led_init);
module_exit(dell_led_exit);
| linux-master | drivers/platform/x86/dell/dell-wmi-led.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Dell WMI hotkeys
*
* Copyright (C) 2008 Red Hat <[email protected]>
* Copyright (C) 2014-2015 Pali Rohár <[email protected]>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <[email protected]>
* Copyright (C) 2005 Bernhard Rosenkraenzer <[email protected]>
* Copyright (C) 2005 Dmitry Torokhov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/acpi.h>
#include <linux/string.h>
#include <linux/dmi.h>
#include <linux/wmi.h>
#include <acpi/video.h>
#include "dell-smbios.h"
#include "dell-wmi-descriptor.h"
#include "dell-wmi-privacy.h"
MODULE_AUTHOR("Matthew Garrett <[email protected]>");
MODULE_AUTHOR("Pali Rohár <[email protected]>");
MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
static bool wmi_requires_smbios_request;
struct dell_wmi_priv {
struct input_dev *input_dev;
struct input_dev *tabletswitch_dev;
u32 interface_version;
};
static int __init dmi_matched(const struct dmi_system_id *dmi)
{
wmi_requires_smbios_request = 1;
return 1;
}
static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
{
.callback = dmi_matched,
.ident = "Dell Inspiron M5110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
},
},
{
.callback = dmi_matched,
.ident = "Dell Vostro V131",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
},
{ }
};
/*
* Keymap for WMI events of type 0x0000
*
* Certain keys are flagged as KE_IGNORE. All of these are either
* notifications (rather than requests for change) or are also sent
* via the keyboard controller so should not be sent again.
*/
static const struct key_entry dell_wmi_keymap_type_0000[] = {
{ KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
/* Key code is followed by brightness level */
{ KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
/* Battery health status button */
{ KE_KEY, 0xe007, { KEY_BATTERY } },
/* Radio devices state change, key code is followed by other values */
{ KE_IGNORE, 0xe008, { KEY_RFKILL } },
{ KE_KEY, 0xe009, { KEY_EJECTCD } },
/* Key code is followed by: next, active and attached devices */
{ KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
/* Key code is followed by keyboard illumination level */
{ KE_IGNORE, 0xe00c, { KEY_KBDILLUMTOGGLE } },
/* BIOS error detected */
{ KE_IGNORE, 0xe00d, { KEY_RESERVED } },
/* Battery was removed or inserted */
{ KE_IGNORE, 0xe00e, { KEY_RESERVED } },
/* Wifi Catcher */
{ KE_KEY, 0xe011, { KEY_WLAN } },
/* Ambient light sensor toggle */
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
/* Unknown, defined in ACPI DSDT */
/* { KE_IGNORE, 0xe023, { KEY_RESERVED } }, */
/* Untested, Dell Instant Launch key on Inspiron 7520 */
/* { KE_IGNORE, 0xe024, { KEY_RESERVED } }, */
/* Dell Instant Launch key */
{ KE_KEY, 0xe025, { KEY_PROG4 } },
/* Audio panel key */
{ KE_IGNORE, 0xe026, { KEY_RESERVED } },
/* LCD Display On/Off Control key */
{ KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
/* Untested, Multimedia key on Dell Vostro 3560 */
/* { KE_IGNORE, 0xe028, { KEY_RESERVED } }, */
/* Dell Instant Launch key */
{ KE_KEY, 0xe029, { KEY_PROG4 } },
/* Untested, Windows Mobility Center button on Inspiron 7520 */
/* { KE_IGNORE, 0xe02a, { KEY_RESERVED } }, */
/* Unknown, defined in ACPI DSDT */
/* { KE_IGNORE, 0xe02b, { KEY_RESERVED } }, */
/* Untested, Dell Audio With Preset Switch button on Inspiron 7520 */
/* { KE_IGNORE, 0xe02c, { KEY_RESERVED } }, */
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0xe034, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
/* NIC Link is Up */
{ KE_IGNORE, 0xe043, { KEY_RESERVED } },
/* NIC Link is Down */
{ KE_IGNORE, 0xe044, { KEY_RESERVED } },
/*
* This entry is very suspicious!
* Originally Matthew Garrett created this dell-wmi driver specially for
* "button with a picture of a battery" which has event code 0xe045.
* Later Mario Limonciello from Dell told us that event code 0xe045 is
* reported by Num Lock and should be ignored because key is send also
* by keyboard controller.
* So for now we will ignore this event to prevent potential double
* Num Lock key press.
*/
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
/* Scroll lock and also going to tablet mode on portable devices */
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
/* Untested, going from tablet mode on portable devices */
/* { KE_IGNORE, 0xe047, { KEY_RESERVED } }, */
/* Dell Support Center key */
{ KE_IGNORE, 0xe06e, { KEY_RESERVED } },
{ KE_IGNORE, 0xe0f7, { KEY_MUTE } },
{ KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
};
struct dell_bios_keymap_entry {
u16 scancode;
u16 keycode;
};
struct dell_bios_hotkey_table {
struct dmi_header header;
struct dell_bios_keymap_entry keymap[];
};
struct dell_dmi_results {
int err;
int keymap_size;
struct key_entry *keymap;
};
/* Uninitialized entries here are KEY_RESERVED == 0. */
static const u16 bios_to_linux_keycode[256] = {
[0] = KEY_MEDIA,
[1] = KEY_NEXTSONG,
[2] = KEY_PLAYPAUSE,
[3] = KEY_PREVIOUSSONG,
[4] = KEY_STOPCD,
[5] = KEY_UNKNOWN,
[6] = KEY_UNKNOWN,
[7] = KEY_UNKNOWN,
[8] = KEY_WWW,
[9] = KEY_UNKNOWN,
[10] = KEY_VOLUMEDOWN,
[11] = KEY_MUTE,
[12] = KEY_VOLUMEUP,
[13] = KEY_UNKNOWN,
[14] = KEY_BATTERY,
[15] = KEY_EJECTCD,
[16] = KEY_UNKNOWN,
[17] = KEY_SLEEP,
[18] = KEY_PROG1,
[19] = KEY_BRIGHTNESSDOWN,
[20] = KEY_BRIGHTNESSUP,
[21] = KEY_BRIGHTNESS_AUTO,
[22] = KEY_KBDILLUMTOGGLE,
[23] = KEY_UNKNOWN,
[24] = KEY_SWITCHVIDEOMODE,
[25] = KEY_UNKNOWN,
[26] = KEY_UNKNOWN,
[27] = KEY_SWITCHVIDEOMODE,
[28] = KEY_UNKNOWN,
[29] = KEY_UNKNOWN,
[30] = KEY_PROG2,
[31] = KEY_UNKNOWN,
[32] = KEY_UNKNOWN,
[33] = KEY_UNKNOWN,
[34] = KEY_UNKNOWN,
[35] = KEY_UNKNOWN,
[36] = KEY_UNKNOWN,
[37] = KEY_UNKNOWN,
[38] = KEY_MICMUTE,
[255] = KEY_PROG3,
};
/*
* Keymap for WMI events of type 0x0010
*
* These are applied if the 0xB2 DMI hotkey table is present and doesn't
* override them.
*/
static const struct key_entry dell_wmi_keymap_type_0010[] = {
/* Fn-lock switched to function keys */
{ KE_IGNORE, 0x0, { KEY_RESERVED } },
/* Fn-lock switched to multimedia keys */
{ KE_IGNORE, 0x1, { KEY_RESERVED } },
/* Keyboard backlight change notification */
{ KE_IGNORE, 0x3f, { KEY_RESERVED } },
/* Backlight brightness level */
{ KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x58, { KEY_BRIGHTNESSUP } },
/*Speaker Mute*/
{ KE_KEY, 0x109, { KEY_MUTE} },
/* Mic mute */
{ KE_KEY, 0x150, { KEY_MICMUTE } },
/* Fn-lock */
{ KE_IGNORE, 0x151, { KEY_RESERVED } },
/* Change keyboard illumination */
{ KE_IGNORE, 0x152, { KEY_KBDILLUMTOGGLE } },
/*
* Radio disable (notify only -- there is no model for which the
* WMI event is supposed to trigger an action).
*/
{ KE_IGNORE, 0x153, { KEY_RFKILL } },
/* RGB keyboard backlight control */
{ KE_IGNORE, 0x154, { KEY_RESERVED } },
/*
* Stealth mode toggle. This will "disable all lights and sounds".
* The action is performed by the BIOS and EC; the WMI event is just
* a notification. On the XPS 13 9350, this is Fn+F7, and there's
* a BIOS setting to enable and disable the hotkey.
*/
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
/* Rugged magnetic dock attach/detach events */
{ KE_IGNORE, 0x156, { KEY_RESERVED } },
{ KE_IGNORE, 0x157, { KEY_RESERVED } },
/* Rugged programmable (P1/P2/P3 keys) */
{ KE_KEY, 0x850, { KEY_PROG1 } },
{ KE_KEY, 0x851, { KEY_PROG2 } },
{ KE_KEY, 0x852, { KEY_PROG3 } },
/*
* Radio disable (notify only -- there is no model for which the
* WMI event is supposed to trigger an action).
*/
{ KE_IGNORE, 0xe008, { KEY_RFKILL } },
/* Fn-lock */
{ KE_IGNORE, 0xe035, { KEY_RESERVED } },
};
/*
* Keymap for WMI events of type 0x0011
*/
static const struct key_entry dell_wmi_keymap_type_0011[] = {
/* Reflex keyboard switch on 2n1 devices */
{ KE_IGNORE, 0xe070, { KEY_RESERVED } },
/* Battery unplugged */
{ KE_IGNORE, 0xfff0, { KEY_RESERVED } },
/* Battery inserted */
{ KE_IGNORE, 0xfff1, { KEY_RESERVED } },
/*
* Detachable keyboard detached / undocked
* Note SW_TABLET_MODE is already reported through the intel_vbtn
* driver for this, so we ignore it.
*/
{ KE_IGNORE, 0xfff2, { KEY_RESERVED } },
/* Detachable keyboard attached / docked */
{ KE_IGNORE, 0xfff3, { KEY_RESERVED } },
/* Keyboard backlight level changed */
{ KE_IGNORE, KBD_LED_OFF_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_ON_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_AUTO_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_AUTO_25_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_AUTO_50_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_AUTO_75_TOKEN, { KEY_RESERVED } },
{ KE_IGNORE, KBD_LED_AUTO_100_TOKEN, { KEY_RESERVED } },
};
/*
* Keymap for WMI events of type 0x0012
* They are events with extended data
*/
static const struct key_entry dell_wmi_keymap_type_0012[] = {
/* Backlight brightness change event */
{ KE_IGNORE, 0x0003, { KEY_RESERVED } },
/* Ultra-performance mode switch request */
{ KE_IGNORE, 0x000d, { KEY_RESERVED } },
/* Fn-lock button pressed */
{ KE_IGNORE, 0xe035, { KEY_RESERVED } },
};
static void dell_wmi_switch_event(struct input_dev **subdev,
const char *devname,
int switchid,
int value)
{
if (!*subdev) {
struct input_dev *dev = input_allocate_device();
if (!dev) {
pr_warn("could not allocate device for %s\n", devname);
return;
}
__set_bit(EV_SW, (dev)->evbit);
__set_bit(switchid, (dev)->swbit);
(dev)->name = devname;
(dev)->id.bustype = BUS_HOST;
if (input_register_device(dev)) {
input_free_device(dev);
pr_warn("could not register device for %s\n", devname);
return;
}
*subdev = dev;
}
input_report_switch(*subdev, switchid, value);
input_sync(*subdev);
}
static int dell_wmi_process_key(struct wmi_device *wdev, int type, int code, u16 *buffer, int remaining)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
const struct key_entry *key;
int used = 0;
int value = 1;
key = sparse_keymap_entry_from_scancode(priv->input_dev,
(type << 16) | code);
if (!key) {
pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
type, code);
return 0;
}
pr_debug("Key with type 0x%04x and code 0x%04x pressed\n", type, code);
/* Don't report brightness notifications that will also come via ACPI */
if ((key->keycode == KEY_BRIGHTNESSUP ||
key->keycode == KEY_BRIGHTNESSDOWN) &&
acpi_video_handles_brightness_key_presses())
return 0;
if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
return 0;
if (key->keycode == KEY_KBDILLUMTOGGLE) {
dell_laptop_call_notifier(
DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED, NULL);
} else if (type == 0x0011 && code == 0xe070 && remaining > 0) {
dell_wmi_switch_event(&priv->tabletswitch_dev,
"Dell tablet mode switch",
SW_TABLET_MODE, !buffer[0]);
return 1;
} else if (type == 0x0012 && code == 0x000d && remaining > 0) {
value = (buffer[2] == 2);
used = 1;
}
sparse_keymap_report_entry(priv->input_dev, key, value, true);
return used;
}
static void dell_wmi_notify(struct wmi_device *wdev,
union acpi_object *obj)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
u16 *buffer_entry, *buffer_end;
acpi_size buffer_size;
int len, i;
if (obj->type != ACPI_TYPE_BUFFER) {
pr_warn("bad response type %x\n", obj->type);
return;
}
pr_debug("Received WMI event (%*ph)\n",
obj->buffer.length, obj->buffer.pointer);
buffer_entry = (u16 *)obj->buffer.pointer;
buffer_size = obj->buffer.length/2;
buffer_end = buffer_entry + buffer_size;
/*
* BIOS/ACPI on devices with WMI interface version 0 does not clear
* buffer before filling it. So next time when BIOS/ACPI send WMI event
* which is smaller as previous then it contains garbage in buffer from
* previous event.
*
* BIOS/ACPI on devices with WMI interface version 1 clears buffer and
* sometimes send more events in buffer at one call.
*
* So to prevent reading garbage from buffer we will process only first
* one event on devices with WMI interface version 0.
*/
if (priv->interface_version == 0 && buffer_entry < buffer_end)
if (buffer_end > buffer_entry + buffer_entry[0] + 1)
buffer_end = buffer_entry + buffer_entry[0] + 1;
while (buffer_entry < buffer_end) {
len = buffer_entry[0];
if (len == 0)
break;
len++;
if (buffer_entry + len > buffer_end) {
pr_warn("Invalid length of WMI event\n");
break;
}
pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
switch (buffer_entry[1]) {
case 0x0000: /* One key pressed or event occurred */
if (len > 2)
dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[2],
buffer_entry + 3,
len - 3);
/* Extended data is currently ignored */
break;
case 0x0010: /* Sequence of keys pressed */
case 0x0011: /* Sequence of events occurred */
for (i = 2; i < len; ++i)
i += dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[i],
buffer_entry + i,
len - i - 1);
break;
case 0x0012:
if ((len > 4) && dell_privacy_process_event(buffer_entry[1], buffer_entry[3],
buffer_entry[4]))
/* dell_privacy_process_event has handled the event */;
else if (len > 2)
dell_wmi_process_key(wdev, buffer_entry[1], buffer_entry[2],
buffer_entry + 3, len - 3);
break;
default: /* Unknown event */
pr_info("Unknown WMI event type 0x%x\n",
(int)buffer_entry[1]);
break;
}
buffer_entry += len;
}
}
static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
{
int i;
for (i = 0; i < len; i++)
if (keymap[i].code == scancode)
return true;
return false;
}
static void handle_dmi_entry(const struct dmi_header *dm, void *opaque)
{
struct dell_dmi_results *results = opaque;
struct dell_bios_hotkey_table *table;
int hotkey_num, i, pos = 0;
struct key_entry *keymap;
if (results->err || results->keymap)
return; /* We already found the hotkey table. */
/* The Dell hotkey table is type 0xB2. Scan until we find it. */
if (dm->type != 0xb2)
return;
table = container_of(dm, struct dell_bios_hotkey_table, header);
hotkey_num = (table->header.length -
sizeof(struct dell_bios_hotkey_table)) /
sizeof(struct dell_bios_keymap_entry);
if (hotkey_num < 1) {
/*
* Historically, dell-wmi would ignore a DMI entry of
* fewer than 7 bytes. Sizes between 4 and 8 bytes are
* nonsensical (both the header and all entries are 4
* bytes), so we approximate the old behavior by
* ignoring tables with fewer than one entry.
*/
return;
}
keymap = kcalloc(hotkey_num, sizeof(struct key_entry), GFP_KERNEL);
if (!keymap) {
results->err = -ENOMEM;
return;
}
for (i = 0; i < hotkey_num; i++) {
const struct dell_bios_keymap_entry *bios_entry =
&table->keymap[i];
/* Uninitialized entries are 0 aka KEY_RESERVED. */
u16 keycode = (bios_entry->keycode <
ARRAY_SIZE(bios_to_linux_keycode)) ?
bios_to_linux_keycode[bios_entry->keycode] :
(bios_entry->keycode == 0xffff ? KEY_UNKNOWN : KEY_RESERVED);
/*
* Log if we find an entry in the DMI table that we don't
* understand. If this happens, we should figure out what
* the entry means and add it to bios_to_linux_keycode.
*/
if (keycode == KEY_RESERVED) {
pr_info("firmware scancode 0x%x maps to unrecognized keycode 0x%x\n",
bios_entry->scancode, bios_entry->keycode);
continue;
}
if (keycode == KEY_KBDILLUMTOGGLE)
keymap[pos].type = KE_IGNORE;
else
keymap[pos].type = KE_KEY;
keymap[pos].code = bios_entry->scancode;
keymap[pos].keycode = keycode;
pos++;
}
results->keymap = keymap;
results->keymap_size = pos;
}
static int dell_wmi_input_setup(struct wmi_device *wdev)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
struct dell_dmi_results dmi_results = {};
struct key_entry *keymap;
int err, i, pos = 0;
priv->input_dev = input_allocate_device();
if (!priv->input_dev)
return -ENOMEM;
priv->input_dev->name = "Dell WMI hotkeys";
priv->input_dev->id.bustype = BUS_HOST;
priv->input_dev->dev.parent = &wdev->dev;
if (dmi_walk(handle_dmi_entry, &dmi_results)) {
/*
* Historically, dell-wmi ignored dmi_walk errors. A failure
* is certainly surprising, but it probably just indicates
* a very old laptop.
*/
pr_warn("no DMI; using the old-style hotkey interface\n");
}
if (dmi_results.err) {
err = dmi_results.err;
goto err_free_dev;
}
keymap = kcalloc(dmi_results.keymap_size +
ARRAY_SIZE(dell_wmi_keymap_type_0000) +
ARRAY_SIZE(dell_wmi_keymap_type_0010) +
ARRAY_SIZE(dell_wmi_keymap_type_0011) +
ARRAY_SIZE(dell_wmi_keymap_type_0012) +
1,
sizeof(struct key_entry), GFP_KERNEL);
if (!keymap) {
kfree(dmi_results.keymap);
err = -ENOMEM;
goto err_free_dev;
}
/* Append table with events of type 0x0010 which comes from DMI */
for (i = 0; i < dmi_results.keymap_size; i++) {
keymap[pos] = dmi_results.keymap[i];
keymap[pos].code |= (0x0010 << 16);
pos++;
}
kfree(dmi_results.keymap);
/* Append table with extra events of type 0x0010 which are not in DMI */
for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0010); i++) {
const struct key_entry *entry = &dell_wmi_keymap_type_0010[i];
/*
* Check if we've already found this scancode. This takes
* quadratic time, but it doesn't matter unless the list
* of extra keys gets very long.
*/
if (dmi_results.keymap_size &&
have_scancode(entry->code | (0x0010 << 16),
keymap, dmi_results.keymap_size)
)
continue;
keymap[pos] = *entry;
keymap[pos].code |= (0x0010 << 16);
pos++;
}
/* Append table with events of type 0x0011 */
for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0011); i++) {
keymap[pos] = dell_wmi_keymap_type_0011[i];
keymap[pos].code |= (0x0011 << 16);
pos++;
}
/* Append table with events of type 0x0012 */
for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
keymap[pos] = dell_wmi_keymap_type_0012[i];
keymap[pos].code |= (0x0012 << 16);
pos++;
}
/*
* Now append also table with "legacy" events of type 0x0000. Some of
* them are reported also on laptops which have scancodes in DMI.
*/
for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0000); i++) {
keymap[pos] = dell_wmi_keymap_type_0000[i];
pos++;
}
keymap[pos].type = KE_END;
err = sparse_keymap_setup(priv->input_dev, keymap, NULL);
/*
* Sparse keymap library makes a copy of keymap so we don't need the
* original one that was allocated.
*/
kfree(keymap);
if (err)
goto err_free_dev;
err = input_register_device(priv->input_dev);
if (err)
goto err_free_dev;
return 0;
err_free_dev:
input_free_device(priv->input_dev);
return err;
}
static void dell_wmi_input_destroy(struct wmi_device *wdev)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
input_unregister_device(priv->input_dev);
if (priv->tabletswitch_dev)
input_unregister_device(priv->tabletswitch_dev);
}
/*
* According to Dell SMBIOS documentation:
*
* 17 3 Application Program Registration
*
* cbArg1 Application ID 1 = 0x00010000
* cbArg2 Application ID 2
* QUICKSET/DCP = 0x51534554 "QSET"
* ALS Driver = 0x416c7353 "AlsS"
* Latitude ON = 0x4c6f6e52 "LonR"
* cbArg3 Application version or revision number
* cbArg4 0 = Unregister application
* 1 = Register application
* cbRes1 Standard return codes (0, -1, -2)
*/
static int dell_wmi_events_set_enabled(bool enable)
{
struct calling_interface_buffer *buffer;
int ret;
buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer->cmd_class = CLASS_INFO;
buffer->cmd_select = SELECT_APP_REGISTRATION;
buffer->input[0] = 0x10000;
buffer->input[1] = 0x51534554;
buffer->input[3] = enable;
ret = dell_smbios_call(buffer);
if (ret == 0)
ret = buffer->output[0];
kfree(buffer);
return dell_smbios_error(ret);
}
static int dell_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct dell_wmi_priv *priv;
int ret;
ret = dell_wmi_get_descriptor_valid();
if (ret)
return ret;
priv = devm_kzalloc(
&wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, priv);
if (!dell_wmi_get_interface_version(&priv->interface_version))
return -EPROBE_DEFER;
return dell_wmi_input_setup(wdev);
}
static void dell_wmi_remove(struct wmi_device *wdev)
{
dell_wmi_input_destroy(wdev);
}
static const struct wmi_device_id dell_wmi_id_table[] = {
{ .guid_string = DELL_EVENT_GUID },
{ },
};
static struct wmi_driver dell_wmi_driver = {
.driver = {
.name = "dell-wmi",
},
.id_table = dell_wmi_id_table,
.probe = dell_wmi_probe,
.remove = dell_wmi_remove,
.notify = dell_wmi_notify,
};
static int __init dell_wmi_init(void)
{
int err;
dmi_check_system(dell_wmi_smbios_list);
if (wmi_requires_smbios_request) {
err = dell_wmi_events_set_enabled(true);
if (err) {
pr_err("Failed to enable WMI events\n");
return err;
}
}
err = dell_privacy_register_driver();
if (err)
return err;
return wmi_driver_register(&dell_wmi_driver);
}
late_initcall(dell_wmi_init);
static void __exit dell_wmi_exit(void)
{
if (wmi_requires_smbios_request)
dell_wmi_events_set_enabled(false);
wmi_driver_unregister(&dell_wmi_driver);
dell_privacy_unregister_driver();
}
module_exit(dell_wmi_exit);
MODULE_DEVICE_TABLE(wmi, dell_wmi_id_table);
| linux-master | drivers/platform/x86/dell/dell-wmi-base.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dell WMI descriptor driver
*
* Copyright (C) 2017 Dell Inc. All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/wmi.h>
#include "dell-wmi-descriptor.h"
#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
struct descriptor_priv {
struct list_head list;
u32 interface_version;
u32 size;
u32 hotfix;
};
static int descriptor_valid = -EPROBE_DEFER;
static LIST_HEAD(wmi_list);
static DEFINE_MUTEX(list_mutex);
int dell_wmi_get_descriptor_valid(void)
{
if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID))
return -ENODEV;
return descriptor_valid;
}
EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid);
bool dell_wmi_get_interface_version(u32 *version)
{
struct descriptor_priv *priv;
bool ret = false;
mutex_lock(&list_mutex);
priv = list_first_entry_or_null(&wmi_list,
struct descriptor_priv,
list);
if (priv) {
*version = priv->interface_version;
ret = true;
}
mutex_unlock(&list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version);
bool dell_wmi_get_size(u32 *size)
{
struct descriptor_priv *priv;
bool ret = false;
mutex_lock(&list_mutex);
priv = list_first_entry_or_null(&wmi_list,
struct descriptor_priv,
list);
if (priv) {
*size = priv->size;
ret = true;
}
mutex_unlock(&list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(dell_wmi_get_size);
bool dell_wmi_get_hotfix(u32 *hotfix)
{
struct descriptor_priv *priv;
bool ret = false;
mutex_lock(&list_mutex);
priv = list_first_entry_or_null(&wmi_list,
struct descriptor_priv,
list);
if (priv) {
*hotfix = priv->hotfix;
ret = true;
}
mutex_unlock(&list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
/*
* Descriptor buffer is 128 byte long and contains:
*
* Name Offset Length Value
* Vendor Signature 0 4 "DELL"
* Object Signature 4 4 " WMI"
* WMI Interface Version 8 4 <version>
* WMI buffer length 12 4 <length>
* WMI hotfix number 16 4 <hotfix>
*/
static int dell_wmi_descriptor_probe(struct wmi_device *wdev,
const void *context)
{
union acpi_object *obj = NULL;
struct descriptor_priv *priv;
u32 *buffer;
int ret;
obj = wmidev_block_query(wdev, 0);
if (!obj) {
dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
ret = -EIO;
goto out;
}
if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
ret = -EINVAL;
descriptor_valid = ret;
goto out;
}
/* Although it's not technically a failure, this would lead to
* unexpected behavior
*/
if (obj->buffer.length != 128) {
dev_err(&wdev->dev,
"Dell descriptor buffer has unexpected length (%d)\n",
obj->buffer.length);
ret = -EINVAL;
descriptor_valid = ret;
goto out;
}
buffer = (u32 *)obj->buffer.pointer;
if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) {
dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n",
buffer);
ret = -EINVAL;
descriptor_valid = ret;
goto out;
}
descriptor_valid = 0;
if (buffer[2] != 0 && buffer[2] != 1)
dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n",
(unsigned long) buffer[2]);
priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv),
GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto out;
}
priv->interface_version = buffer[2];
priv->size = buffer[3];
priv->hotfix = buffer[4];
ret = 0;
dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
(unsigned long) priv->interface_version,
(unsigned long) priv->size,
(unsigned long) priv->hotfix);
out:
kfree(obj);
return ret;
}
static void dell_wmi_descriptor_remove(struct wmi_device *wdev)
{
struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
}
static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
{ .guid_string = DELL_WMI_DESCRIPTOR_GUID },
{ },
};
static struct wmi_driver dell_wmi_descriptor_driver = {
.driver = {
.name = "dell-wmi-descriptor",
},
.probe = dell_wmi_descriptor_probe,
.remove = dell_wmi_descriptor_remove,
.id_table = dell_wmi_descriptor_id_table,
};
module_wmi_driver(dell_wmi_descriptor_driver);
MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table);
MODULE_AUTHOR("Mario Limonciello <[email protected]>");
MODULE_DESCRIPTION("Dell WMI descriptor driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-wmi-descriptor.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Dell Airplane Mode Switch driver
Copyright (C) 2014-2015 Pali Rohár <[email protected]>
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/input.h>
#include "dell-rbtn.h"
enum rbtn_type {
RBTN_UNKNOWN,
RBTN_TOGGLE,
RBTN_SLIDER,
};
struct rbtn_data {
enum rbtn_type type;
struct rfkill *rfkill;
struct input_dev *input_dev;
bool suspended;
};
/*
* acpi functions
*/
static enum rbtn_type rbtn_check(struct acpi_device *device)
{
unsigned long long output;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "CRBT", NULL, &output);
if (ACPI_FAILURE(status))
return RBTN_UNKNOWN;
switch (output) {
case 0:
case 1:
return RBTN_TOGGLE;
case 2:
case 3:
return RBTN_SLIDER;
default:
return RBTN_UNKNOWN;
}
}
static int rbtn_get(struct acpi_device *device)
{
unsigned long long output;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "GRBT", NULL, &output);
if (ACPI_FAILURE(status))
return -EINVAL;
return !output;
}
static int rbtn_acquire(struct acpi_device *device, bool enable)
{
struct acpi_object_list input;
union acpi_object param;
acpi_status status;
param.type = ACPI_TYPE_INTEGER;
param.integer.value = enable;
input.count = 1;
input.pointer = ¶m;
status = acpi_evaluate_object(device->handle, "ARBT", &input, NULL);
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
/*
* rfkill device
*/
static void rbtn_rfkill_query(struct rfkill *rfkill, void *data)
{
struct acpi_device *device = data;
int state;
state = rbtn_get(device);
if (state < 0)
return;
rfkill_set_states(rfkill, state, state);
}
static int rbtn_rfkill_set_block(void *data, bool blocked)
{
/* NOTE: setting soft rfkill state is not supported */
return -EINVAL;
}
static const struct rfkill_ops rbtn_ops = {
.query = rbtn_rfkill_query,
.set_block = rbtn_rfkill_set_block,
};
static int rbtn_rfkill_init(struct acpi_device *device)
{
struct rbtn_data *rbtn_data = device->driver_data;
int ret;
if (rbtn_data->rfkill)
return 0;
/*
* NOTE: rbtn controls all radio devices, not only WLAN
* but rfkill interface does not support "ANY" type
* so "WLAN" type is used
*/
rbtn_data->rfkill = rfkill_alloc("dell-rbtn", &device->dev,
RFKILL_TYPE_WLAN, &rbtn_ops, device);
if (!rbtn_data->rfkill)
return -ENOMEM;
ret = rfkill_register(rbtn_data->rfkill);
if (ret) {
rfkill_destroy(rbtn_data->rfkill);
rbtn_data->rfkill = NULL;
return ret;
}
return 0;
}
static void rbtn_rfkill_exit(struct acpi_device *device)
{
struct rbtn_data *rbtn_data = device->driver_data;
if (!rbtn_data->rfkill)
return;
rfkill_unregister(rbtn_data->rfkill);
rfkill_destroy(rbtn_data->rfkill);
rbtn_data->rfkill = NULL;
}
static void rbtn_rfkill_event(struct acpi_device *device)
{
struct rbtn_data *rbtn_data = device->driver_data;
if (rbtn_data->rfkill)
rbtn_rfkill_query(rbtn_data->rfkill, device);
}
/*
* input device
*/
static int rbtn_input_init(struct rbtn_data *rbtn_data)
{
int ret;
rbtn_data->input_dev = input_allocate_device();
if (!rbtn_data->input_dev)
return -ENOMEM;
rbtn_data->input_dev->name = "DELL Wireless hotkeys";
rbtn_data->input_dev->phys = "dellabce/input0";
rbtn_data->input_dev->id.bustype = BUS_HOST;
rbtn_data->input_dev->evbit[0] = BIT(EV_KEY);
set_bit(KEY_RFKILL, rbtn_data->input_dev->keybit);
ret = input_register_device(rbtn_data->input_dev);
if (ret) {
input_free_device(rbtn_data->input_dev);
rbtn_data->input_dev = NULL;
return ret;
}
return 0;
}
static void rbtn_input_exit(struct rbtn_data *rbtn_data)
{
input_unregister_device(rbtn_data->input_dev);
rbtn_data->input_dev = NULL;
}
static void rbtn_input_event(struct rbtn_data *rbtn_data)
{
input_report_key(rbtn_data->input_dev, KEY_RFKILL, 1);
input_sync(rbtn_data->input_dev);
input_report_key(rbtn_data->input_dev, KEY_RFKILL, 0);
input_sync(rbtn_data->input_dev);
}
/*
* acpi driver
*/
static int rbtn_add(struct acpi_device *device);
static void rbtn_remove(struct acpi_device *device);
static void rbtn_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id rbtn_ids[] = {
{ "DELRBTN", 0 },
{ "DELLABCE", 0 },
/*
* This driver can also handle the "DELLABC6" device that
* appears on the XPS 13 9350, but that device is disabled by
* the DSDT unless booted with acpi_osi="!Windows 2012"
* acpi_osi="!Windows 2013".
*
* According to Mario at Dell:
*
* DELLABC6 is a custom interface that was created solely to
* have airplane mode support for Windows 7. For Windows 10
* the proper interface is to use that which is handled by
* intel-hid. A OEM airplane mode driver is not used.
*
* Since the kernel doesn't identify as Windows 7 it would be
* incorrect to do attempt to use that interface.
*
* Even if we override _OSI and bind to DELLABC6, we end up with
* inconsistent behavior in which userspace can get out of sync
* with the rfkill state as it conflicts with events from
* intel-hid.
*
* The upshot is that it is better to just ignore DELLABC6
* devices.
*/
{ "", 0 },
};
#ifdef CONFIG_PM_SLEEP
static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
{
struct rbtn_data *rbtn_data = context;
rbtn_data->suspended = false;
}
static int rbtn_suspend(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct rbtn_data *rbtn_data = acpi_driver_data(device);
rbtn_data->suspended = true;
return 0;
}
static int rbtn_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct rbtn_data *rbtn_data = acpi_driver_data(device);
acpi_status status;
/*
* Upon resume, some BIOSes send an ACPI notification thet triggers
* an unwanted input event. In order to ignore it, we use a flag
* that we set at suspend and clear once we have received the extra
* ACPI notification. Since ACPI notifications are delivered
* asynchronously to drivers, we clear the flag from the workqueue
* used to deliver the notifications. This should be enough
* to have the flag cleared only after we received the extra
* notification, if any.
*/
status = acpi_os_execute(OSL_NOTIFY_HANDLER,
rbtn_clear_suspended_flag, rbtn_data);
if (ACPI_FAILURE(status))
rbtn_clear_suspended_flag(rbtn_data);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
static struct acpi_driver rbtn_driver = {
.name = "dell-rbtn",
.ids = rbtn_ids,
.drv.pm = &rbtn_pm_ops,
.ops = {
.add = rbtn_add,
.remove = rbtn_remove,
.notify = rbtn_notify,
},
.owner = THIS_MODULE,
};
/*
* notifier export functions
*/
static bool auto_remove_rfkill = true;
static ATOMIC_NOTIFIER_HEAD(rbtn_chain_head);
static int rbtn_inc_count(struct device *dev, void *data)
{
struct acpi_device *device = to_acpi_device(dev);
struct rbtn_data *rbtn_data = device->driver_data;
int *count = data;
if (rbtn_data->type == RBTN_SLIDER)
(*count)++;
return 0;
}
static int rbtn_switch_dev(struct device *dev, void *data)
{
struct acpi_device *device = to_acpi_device(dev);
struct rbtn_data *rbtn_data = device->driver_data;
bool enable = data;
if (rbtn_data->type != RBTN_SLIDER)
return 0;
if (enable)
rbtn_rfkill_init(device);
else
rbtn_rfkill_exit(device);
return 0;
}
int dell_rbtn_notifier_register(struct notifier_block *nb)
{
bool first;
int count;
int ret;
count = 0;
ret = driver_for_each_device(&rbtn_driver.drv, NULL, &count,
rbtn_inc_count);
if (ret || count == 0)
return -ENODEV;
first = !rbtn_chain_head.head;
ret = atomic_notifier_chain_register(&rbtn_chain_head, nb);
if (ret != 0)
return ret;
if (auto_remove_rfkill && first)
ret = driver_for_each_device(&rbtn_driver.drv, NULL,
(void *)false, rbtn_switch_dev);
return ret;
}
EXPORT_SYMBOL_GPL(dell_rbtn_notifier_register);
int dell_rbtn_notifier_unregister(struct notifier_block *nb)
{
int ret;
ret = atomic_notifier_chain_unregister(&rbtn_chain_head, nb);
if (ret != 0)
return ret;
if (auto_remove_rfkill && !rbtn_chain_head.head)
ret = driver_for_each_device(&rbtn_driver.drv, NULL,
(void *)true, rbtn_switch_dev);
return ret;
}
EXPORT_SYMBOL_GPL(dell_rbtn_notifier_unregister);
/*
* acpi driver functions
*/
static int rbtn_add(struct acpi_device *device)
{
struct rbtn_data *rbtn_data;
enum rbtn_type type;
int ret = 0;
type = rbtn_check(device);
if (type == RBTN_UNKNOWN) {
dev_info(&device->dev, "Unknown device type\n");
return -EINVAL;
}
rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL);
if (!rbtn_data)
return -ENOMEM;
ret = rbtn_acquire(device, true);
if (ret < 0) {
dev_err(&device->dev, "Cannot enable device\n");
return ret;
}
rbtn_data->type = type;
device->driver_data = rbtn_data;
switch (rbtn_data->type) {
case RBTN_TOGGLE:
ret = rbtn_input_init(rbtn_data);
break;
case RBTN_SLIDER:
if (auto_remove_rfkill && rbtn_chain_head.head)
ret = 0;
else
ret = rbtn_rfkill_init(device);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
rbtn_acquire(device, false);
return ret;
}
static void rbtn_remove(struct acpi_device *device)
{
struct rbtn_data *rbtn_data = device->driver_data;
switch (rbtn_data->type) {
case RBTN_TOGGLE:
rbtn_input_exit(rbtn_data);
break;
case RBTN_SLIDER:
rbtn_rfkill_exit(device);
break;
default:
break;
}
rbtn_acquire(device, false);
}
static void rbtn_notify(struct acpi_device *device, u32 event)
{
struct rbtn_data *rbtn_data = device->driver_data;
/*
* Some BIOSes send a notification at resume.
* Ignore it to prevent unwanted input events.
*/
if (rbtn_data->suspended) {
dev_dbg(&device->dev, "ACPI notification ignored\n");
return;
}
if (event != 0x80) {
dev_info(&device->dev, "Received unknown event (0x%x)\n",
event);
return;
}
switch (rbtn_data->type) {
case RBTN_TOGGLE:
rbtn_input_event(rbtn_data);
break;
case RBTN_SLIDER:
rbtn_rfkill_event(device);
atomic_notifier_call_chain(&rbtn_chain_head, event, device);
break;
default:
break;
}
}
/*
* module functions
*/
module_acpi_driver(rbtn_driver);
module_param(auto_remove_rfkill, bool, 0444);
MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when "
"other modules start receiving events "
"from this module and re-add them when "
"the last module stops receiving events "
"(default true)");
MODULE_DEVICE_TABLE(acpi, rbtn_ids);
MODULE_DESCRIPTION("Dell Airplane Mode Switch driver");
MODULE_AUTHOR("Pali Rohár <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-rbtn.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* dcdbas.c: Dell Systems Management Base Driver
*
* The Dell Systems Management Base Driver provides a sysfs interface for
* systems management software to perform System Management Interrupts (SMIs)
* and Host Control Actions (power cycle or power off after OS shutdown) on
* Dell systems.
*
* See Documentation/driver-api/dcdbas.rst for more information.
*
* Copyright (C) 1995-2006 Dell Inc.
*/
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
#include <linux/dmi.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include "dcdbas.h"
#define DRIVER_NAME "dcdbas"
#define DRIVER_VERSION "5.6.0-3.4"
#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
static struct platform_device *dcdbas_pdev;
static unsigned long max_smi_data_buf_size = MAX_SMI_DATA_BUF_SIZE;
static DEFINE_MUTEX(smi_data_lock);
static u8 *bios_buffer;
static struct smi_buffer smi_buf;
static unsigned int host_control_action;
static unsigned int host_control_smi_type;
static unsigned int host_control_on_shutdown;
static bool wsmt_enabled;
int dcdbas_smi_alloc(struct smi_buffer *smi_buffer, unsigned long size)
{
smi_buffer->virt = dma_alloc_coherent(&dcdbas_pdev->dev, size,
&smi_buffer->dma, GFP_KERNEL);
if (!smi_buffer->virt) {
dev_dbg(&dcdbas_pdev->dev,
"%s: failed to allocate memory size %lu\n",
__func__, size);
return -ENOMEM;
}
smi_buffer->size = size;
dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
__func__, (u32)smi_buffer->dma, smi_buffer->size);
return 0;
}
EXPORT_SYMBOL_GPL(dcdbas_smi_alloc);
void dcdbas_smi_free(struct smi_buffer *smi_buffer)
{
if (!smi_buffer->virt)
return;
dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
__func__, (u32)smi_buffer->dma, smi_buffer->size);
dma_free_coherent(&dcdbas_pdev->dev, smi_buffer->size,
smi_buffer->virt, smi_buffer->dma);
smi_buffer->virt = NULL;
smi_buffer->dma = 0;
smi_buffer->size = 0;
}
EXPORT_SYMBOL_GPL(dcdbas_smi_free);
/**
* smi_data_buf_free: free SMI data buffer
*/
static void smi_data_buf_free(void)
{
if (!smi_buf.virt || wsmt_enabled)
return;
dcdbas_smi_free(&smi_buf);
}
/**
* smi_data_buf_realloc: grow SMI data buffer if needed
*/
static int smi_data_buf_realloc(unsigned long size)
{
struct smi_buffer tmp;
int ret;
if (smi_buf.size >= size)
return 0;
if (size > max_smi_data_buf_size)
return -EINVAL;
/* new buffer is needed */
ret = dcdbas_smi_alloc(&tmp, size);
if (ret)
return ret;
/* memory zeroed by dma_alloc_coherent */
if (smi_buf.virt)
memcpy(tmp.virt, smi_buf.virt, smi_buf.size);
/* free any existing buffer */
smi_data_buf_free();
/* set up new buffer for use */
smi_buf = tmp;
return 0;
}
static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%x\n", (u32)smi_buf.dma);
}
static ssize_t smi_data_buf_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", smi_buf.size);
}
static ssize_t smi_data_buf_size_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long buf_size;
ssize_t ret;
buf_size = simple_strtoul(buf, NULL, 10);
/* make sure SMI data buffer is at least buf_size */
mutex_lock(&smi_data_lock);
ret = smi_data_buf_realloc(buf_size);
mutex_unlock(&smi_data_lock);
if (ret)
return ret;
return count;
}
static ssize_t smi_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
mutex_lock(&smi_data_lock);
ret = memory_read_from_buffer(buf, count, &pos, smi_buf.virt,
smi_buf.size);
mutex_unlock(&smi_data_lock);
return ret;
}
static ssize_t smi_data_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
ssize_t ret;
if ((pos + count) > max_smi_data_buf_size)
return -EINVAL;
mutex_lock(&smi_data_lock);
ret = smi_data_buf_realloc(pos + count);
if (ret)
goto out;
memcpy(smi_buf.virt + pos, buf, count);
ret = count;
out:
mutex_unlock(&smi_data_lock);
return ret;
}
static ssize_t host_control_action_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", host_control_action);
}
static ssize_t host_control_action_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret;
/* make sure buffer is available for host control command */
mutex_lock(&smi_data_lock);
ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
mutex_unlock(&smi_data_lock);
if (ret)
return ret;
host_control_action = simple_strtoul(buf, NULL, 10);
return count;
}
static ssize_t host_control_smi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", host_control_smi_type);
}
static ssize_t host_control_smi_type_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
host_control_smi_type = simple_strtoul(buf, NULL, 10);
return count;
}
static ssize_t host_control_on_shutdown_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", host_control_on_shutdown);
}
static ssize_t host_control_on_shutdown_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
return count;
}
static int raise_smi(void *par)
{
struct smi_cmd *smi_cmd = par;
if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__);
return -EBUSY;
}
/* generate SMI */
/* inb to force posted write through and make SMI happen now */
asm volatile (
"outb %b0,%w1\n"
"inb %w1"
: /* no output args */
: "a" (smi_cmd->command_code),
"d" (smi_cmd->command_address),
"b" (smi_cmd->ebx),
"c" (smi_cmd->ecx)
: "memory"
);
return 0;
}
/**
* dcdbas_smi_request: generate SMI request
*
* Called with smi_data_lock.
*/
int dcdbas_smi_request(struct smi_cmd *smi_cmd)
{
int ret;
if (smi_cmd->magic != SMI_CMD_MAGIC) {
dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
__func__);
return -EBADR;
}
/* SMI requires CPU 0 */
cpus_read_lock();
ret = smp_call_on_cpu(0, raise_smi, smi_cmd, true);
cpus_read_unlock();
return ret;
}
EXPORT_SYMBOL(dcdbas_smi_request);
/**
* smi_request_store:
*
* The valid values are:
* 0: zero SMI data buffer
* 1: generate calling interface SMI
* 2: generate raw SMI
*
* User application writes smi_cmd to smi_data before telling driver
* to generate SMI.
*/
static ssize_t smi_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct smi_cmd *smi_cmd;
unsigned long val = simple_strtoul(buf, NULL, 10);
ssize_t ret;
mutex_lock(&smi_data_lock);
if (smi_buf.size < sizeof(struct smi_cmd)) {
ret = -ENODEV;
goto out;
}
smi_cmd = (struct smi_cmd *)smi_buf.virt;
switch (val) {
case 2:
/* Raw SMI */
ret = dcdbas_smi_request(smi_cmd);
if (!ret)
ret = count;
break;
case 1:
/*
* Calling Interface SMI
*
* Provide physical address of command buffer field within
* the struct smi_cmd to BIOS.
*
* Because the address that smi_cmd (smi_buf.virt) points to
* will be from memremap() of a non-memory address if WSMT
* is present, we can't use virt_to_phys() on smi_cmd, so
* we have to use the physical address that was saved when
* the virtual address for smi_cmd was received.
*/
smi_cmd->ebx = (u32)smi_buf.dma +
offsetof(struct smi_cmd, command_buffer);
ret = dcdbas_smi_request(smi_cmd);
if (!ret)
ret = count;
break;
case 0:
memset(smi_buf.virt, 0, smi_buf.size);
ret = count;
break;
default:
ret = -EINVAL;
break;
}
out:
mutex_unlock(&smi_data_lock);
return ret;
}
/**
* host_control_smi: generate host control SMI
*
* Caller must set up the host control command in smi_buf.virt.
*/
static int host_control_smi(void)
{
struct apm_cmd *apm_cmd;
u8 *data;
unsigned long flags;
u32 num_ticks;
s8 cmd_status;
u8 index;
apm_cmd = (struct apm_cmd *)smi_buf.virt;
apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
switch (host_control_smi_type) {
case HC_SMITYPE_TYPE1:
spin_lock_irqsave(&rtc_lock, flags);
/* write SMI data buffer physical address */
data = (u8 *)&smi_buf.dma;
for (index = PE1300_CMOS_CMD_STRUCT_PTR;
index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
index++, data++) {
outb(index,
(CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
outb(*data,
(CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
}
/* first set status to -1 as called by spec */
cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
/* generate SMM call */
outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
spin_unlock_irqrestore(&rtc_lock, flags);
/* wait a few to see if it executed */
num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
while ((s8)inb(PCAT_APM_STATUS_PORT) == ESM_STATUS_CMD_UNSUCCESSFUL) {
num_ticks--;
if (num_ticks == EXPIRED_TIMER)
return -ETIME;
}
break;
case HC_SMITYPE_TYPE2:
case HC_SMITYPE_TYPE3:
spin_lock_irqsave(&rtc_lock, flags);
/* write SMI data buffer physical address */
data = (u8 *)&smi_buf.dma;
for (index = PE1400_CMOS_CMD_STRUCT_PTR;
index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
index++, data++) {
outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
}
/* generate SMM call */
if (host_control_smi_type == HC_SMITYPE_TYPE3)
outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
else
outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
/* restore RTC index pointer since it was written to above */
CMOS_READ(RTC_REG_C);
spin_unlock_irqrestore(&rtc_lock, flags);
/* read control port back to serialize write */
cmd_status = inb(PE1400_APM_CONTROL_PORT);
/* wait a few to see if it executed */
num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
num_ticks--;
if (num_ticks == EXPIRED_TIMER)
return -ETIME;
}
break;
default:
dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
__func__, host_control_smi_type);
return -ENOSYS;
}
return 0;
}
/**
* dcdbas_host_control: initiate host control
*
* This function is called by the driver after the system has
* finished shutting down if the user application specified a
* host control action to perform on shutdown. It is safe to
* use smi_buf.virt at this point because the system has finished
* shutting down and no userspace apps are running.
*/
static void dcdbas_host_control(void)
{
struct apm_cmd *apm_cmd;
u8 action;
if (host_control_action == HC_ACTION_NONE)
return;
action = host_control_action;
host_control_action = HC_ACTION_NONE;
if (!smi_buf.virt) {
dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
return;
}
if (smi_buf.size < sizeof(struct apm_cmd)) {
dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
__func__);
return;
}
apm_cmd = (struct apm_cmd *)smi_buf.virt;
/* power off takes precedence */
if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
apm_cmd->command = ESM_APM_POWER_CYCLE;
apm_cmd->reserved = 0;
*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
host_control_smi();
} else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
apm_cmd->command = ESM_APM_POWER_CYCLE;
apm_cmd->reserved = 0;
*((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
host_control_smi();
}
}
/* WSMT */
static u8 checksum(u8 *buffer, u8 length)
{
u8 sum = 0;
u8 *end = buffer + length;
while (buffer < end)
sum += *buffer++;
return sum;
}
static inline struct smm_eps_table *check_eps_table(u8 *addr)
{
struct smm_eps_table *eps = (struct smm_eps_table *)addr;
if (strncmp(eps->smm_comm_buff_anchor, SMM_EPS_SIG, 4) != 0)
return NULL;
if (checksum(addr, eps->length) != 0)
return NULL;
return eps;
}
static int dcdbas_check_wsmt(void)
{
const struct dmi_device *dev = NULL;
struct acpi_table_wsmt *wsmt = NULL;
struct smm_eps_table *eps = NULL;
u64 bios_buf_paddr;
u64 remap_size;
u8 *addr;
acpi_get_table(ACPI_SIG_WSMT, 0, (struct acpi_table_header **)&wsmt);
if (!wsmt)
return 0;
/* Check if WSMT ACPI table shows that protection is enabled */
if (!(wsmt->protection_flags & ACPI_WSMT_FIXED_COMM_BUFFERS) ||
!(wsmt->protection_flags & ACPI_WSMT_COMM_BUFFER_NESTED_PTR_PROTECTION))
return 0;
/*
* BIOS could provide the address/size of the protected buffer
* in an SMBIOS string or in an EPS structure in 0xFxxxx.
*/
/* Check SMBIOS for buffer address */
while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, NULL, dev)))
if (sscanf(dev->name, "30[%16llx;%8llx]", &bios_buf_paddr,
&remap_size) == 2)
goto remap;
/* Scan for EPS (entry point structure) */
for (addr = (u8 *)__va(0xf0000);
addr < (u8 *)__va(0x100000 - sizeof(struct smm_eps_table));
addr += 16) {
eps = check_eps_table(addr);
if (eps)
break;
}
if (!eps) {
dev_dbg(&dcdbas_pdev->dev, "found WSMT, but no firmware buffer found\n");
return -ENODEV;
}
bios_buf_paddr = eps->smm_comm_buff_addr;
remap_size = eps->num_of_4k_pages * PAGE_SIZE;
remap:
/*
* Get physical address of buffer and map to virtual address.
* Table gives size in 4K pages, regardless of actual system page size.
*/
if (upper_32_bits(bios_buf_paddr + 8)) {
dev_warn(&dcdbas_pdev->dev, "found WSMT, but buffer address is above 4GB\n");
return -EINVAL;
}
/*
* Limit remap size to MAX_SMI_DATA_BUF_SIZE + 8 (since the first 8
* bytes are used for a semaphore, not the data buffer itself).
*/
if (remap_size > MAX_SMI_DATA_BUF_SIZE + 8)
remap_size = MAX_SMI_DATA_BUF_SIZE + 8;
bios_buffer = memremap(bios_buf_paddr, remap_size, MEMREMAP_WB);
if (!bios_buffer) {
dev_warn(&dcdbas_pdev->dev, "found WSMT, but failed to map buffer\n");
return -ENOMEM;
}
/* First 8 bytes is for a semaphore, not part of the smi_buf.virt */
smi_buf.dma = bios_buf_paddr + 8;
smi_buf.virt = bios_buffer + 8;
smi_buf.size = remap_size - 8;
max_smi_data_buf_size = smi_buf.size;
wsmt_enabled = true;
dev_info(&dcdbas_pdev->dev,
"WSMT found, using firmware-provided SMI buffer.\n");
return 1;
}
/**
* dcdbas_reboot_notify: handle reboot notification for host control
*/
static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
void *unused)
{
switch (code) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
if (host_control_on_shutdown) {
/* firmware is going to perform host control action */
printk(KERN_WARNING "Please wait for shutdown "
"action to complete...\n");
dcdbas_host_control();
}
break;
}
return NOTIFY_DONE;
}
static struct notifier_block dcdbas_reboot_nb = {
.notifier_call = dcdbas_reboot_notify,
.next = NULL,
.priority = INT_MIN
};
static DCDBAS_BIN_ATTR_RW(smi_data);
static struct bin_attribute *dcdbas_bin_attrs[] = {
&bin_attr_smi_data,
NULL
};
static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
static DCDBAS_DEV_ATTR_WO(smi_request);
static DCDBAS_DEV_ATTR_RW(host_control_action);
static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
static struct attribute *dcdbas_dev_attrs[] = {
&dev_attr_smi_data_buf_size.attr,
&dev_attr_smi_data_buf_phys_addr.attr,
&dev_attr_smi_request.attr,
&dev_attr_host_control_action.attr,
&dev_attr_host_control_smi_type.attr,
&dev_attr_host_control_on_shutdown.attr,
NULL
};
static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
.bin_attrs = dcdbas_bin_attrs,
};
static int dcdbas_probe(struct platform_device *dev)
{
int error;
host_control_action = HC_ACTION_NONE;
host_control_smi_type = HC_SMITYPE_NONE;
dcdbas_pdev = dev;
/* Check if ACPI WSMT table specifies protected SMI buffer address */
error = dcdbas_check_wsmt();
if (error < 0)
return error;
/*
* BIOS SMI calls require buffer addresses be in 32-bit address space.
* This is done by setting the DMA mask below.
*/
error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
if (error)
return error;
error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
if (error)
return error;
register_reboot_notifier(&dcdbas_reboot_nb);
dev_info(&dev->dev, "%s (version %s)\n",
DRIVER_DESCRIPTION, DRIVER_VERSION);
return 0;
}
static void dcdbas_remove(struct platform_device *dev)
{
unregister_reboot_notifier(&dcdbas_reboot_nb);
sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
}
static struct platform_driver dcdbas_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = dcdbas_probe,
.remove_new = dcdbas_remove,
};
static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
.id = PLATFORM_DEVID_NONE,
.dma_mask = DMA_BIT_MASK(32),
};
static struct platform_device *dcdbas_pdev_reg;
/**
* dcdbas_init: initialize driver
*/
static int __init dcdbas_init(void)
{
int error;
error = platform_driver_register(&dcdbas_driver);
if (error)
return error;
dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
if (IS_ERR(dcdbas_pdev_reg)) {
error = PTR_ERR(dcdbas_pdev_reg);
goto err_unregister_driver;
}
return 0;
err_unregister_driver:
platform_driver_unregister(&dcdbas_driver);
return error;
}
/**
* dcdbas_exit: perform driver cleanup
*/
static void __exit dcdbas_exit(void)
{
/*
* make sure functions that use dcdbas_pdev are called
* before platform_device_unregister
*/
unregister_reboot_notifier(&dcdbas_reboot_nb);
/*
* We have to free the buffer here instead of dcdbas_remove
* because only in module exit function we can be sure that
* all sysfs attributes belonging to this module have been
* released.
*/
if (dcdbas_pdev)
smi_data_buf_free();
if (bios_buffer)
memunmap(bios_buffer);
platform_device_unregister(dcdbas_pdev_reg);
platform_driver_unregister(&dcdbas_driver);
}
subsys_initcall_sync(dcdbas_init);
module_exit(dcdbas_exit);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Dell Inc.");
MODULE_LICENSE("GPL");
/* Any System or BIOS claiming to be by Dell */
MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
| linux-master | drivers/platform/x86/dell/dcdbas.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common functions for kernel modules using Dell SMBIOS
*
* Copyright (c) Red Hat <[email protected]>
* Copyright (c) 2014 Gabriele Mazzotta <[email protected]>
* Copyright (c) 2014 Pali Rohár <[email protected]>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dell-smbios.h"
static u32 da_supported_commands;
static int da_num_tokens;
static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
static struct device_attribute *token_location_attrs;
static struct device_attribute *token_value_attrs;
static struct attribute **token_attrs;
static DEFINE_MUTEX(smbios_mutex);
struct smbios_device {
struct list_head list;
struct device *device;
int (*call_fn)(struct calling_interface_buffer *arg);
};
struct smbios_call {
u32 need_capability;
int cmd_class;
int cmd_select;
};
/* calls that are whitelisted for given capabilities */
static struct smbios_call call_whitelist[] = {
/* generally tokens are allowed, but may be further filtered or
* restricted by token blacklist or whitelist
*/
{CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_STD},
{CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_AC},
{CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_BAT},
{CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD},
{CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_AC},
{CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT},
/* used by userspace: fwupdate */
{CAP_SYS_ADMIN, CLASS_ADMIN_PROP, SELECT_ADMIN_PROP},
/* used by userspace: fwupd */
{CAP_SYS_ADMIN, CLASS_INFO, SELECT_DOCK},
{CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE, SELECT_FLASH_INTERFACE},
};
/* calls that are explicitly blacklisted */
static struct smbios_call call_blacklist[] = {
{0x0000, 1, 7}, /* manufacturing use */
{0x0000, 6, 5}, /* manufacturing use */
{0x0000, 11, 3}, /* write once */
{0x0000, 11, 7}, /* write once */
{0x0000, 11, 11}, /* write once */
{0x0000, 19, -1}, /* diagnostics */
/* handled by kernel: dell-laptop */
{0x0000, CLASS_INFO, SELECT_RFKILL},
{0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
};
struct token_range {
u32 need_capability;
u16 min;
u16 max;
};
/* tokens that are whitelisted for given capabilities */
static struct token_range token_whitelist[] = {
/* used by userspace: fwupdate */
{CAP_SYS_ADMIN, CAPSULE_EN_TOKEN, CAPSULE_DIS_TOKEN},
/* can indicate to userspace that WMI is needed */
{0x0000, WSMT_EN_TOKEN, WSMT_DIS_TOKEN}
};
/* tokens that are explicitly blacklisted */
static struct token_range token_blacklist[] = {
{0x0000, 0x0058, 0x0059}, /* ME use */
{0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */
{0x0000, 0x013A, 0x01FF}, /* sata shadow copy */
{0x0000, 0x0175, 0x0176}, /* write once */
{0x0000, 0x0195, 0x0197}, /* diagnostics */
{0x0000, 0x01DC, 0x01DD}, /* manufacturing use */
{0x0000, 0x027D, 0x0284}, /* diagnostics */
{0x0000, 0x02E3, 0x02E3}, /* manufacturing use */
{0x0000, 0x02FF, 0x02FF}, /* manufacturing use */
{0x0000, 0x0300, 0x0302}, /* manufacturing use */
{0x0000, 0x0325, 0x0326}, /* manufacturing use */
{0x0000, 0x0332, 0x0335}, /* fan control */
{0x0000, 0x0350, 0x0350}, /* manufacturing use */
{0x0000, 0x0363, 0x0363}, /* manufacturing use */
{0x0000, 0x0368, 0x0368}, /* manufacturing use */
{0x0000, 0x03F6, 0x03F7}, /* manufacturing use */
{0x0000, 0x049E, 0x049F}, /* manufacturing use */
{0x0000, 0x04A0, 0x04A3}, /* disagnostics */
{0x0000, 0x04E6, 0x04E7}, /* manufacturing use */
{0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */
{0x0000, 0x9000, 0x9001}, /* internal BIOS use */
{0x0000, 0xA000, 0xBFFF}, /* write only */
{0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */
/* handled by kernel: dell-laptop */
{0x0000, BRIGHTNESS_TOKEN, BRIGHTNESS_TOKEN},
{0x0000, KBD_LED_OFF_TOKEN, KBD_LED_AUTO_TOKEN},
{0x0000, KBD_LED_AC_TOKEN, KBD_LED_AC_TOKEN},
{0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN},
{0x0000, KBD_LED_AUTO_100_TOKEN, KBD_LED_AUTO_100_TOKEN},
{0x0000, GLOBAL_MIC_MUTE_ENABLE, GLOBAL_MIC_MUTE_DISABLE},
};
static LIST_HEAD(smbios_device_list);
int dell_smbios_error(int value)
{
switch (value) {
case 0: /* Completed successfully */
return 0;
case -1: /* Completed with error */
return -EIO;
case -2: /* Function not supported */
return -ENXIO;
default: /* Unknown error */
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(dell_smbios_error);
int dell_smbios_register_device(struct device *d, void *call_fn)
{
struct smbios_device *priv;
priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL);
if (!priv)
return -ENOMEM;
get_device(d);
priv->device = d;
priv->call_fn = call_fn;
mutex_lock(&smbios_mutex);
list_add_tail(&priv->list, &smbios_device_list);
mutex_unlock(&smbios_mutex);
dev_dbg(d, "Added device: %s\n", d->driver->name);
return 0;
}
EXPORT_SYMBOL_GPL(dell_smbios_register_device);
void dell_smbios_unregister_device(struct device *d)
{
struct smbios_device *priv;
mutex_lock(&smbios_mutex);
list_for_each_entry(priv, &smbios_device_list, list) {
if (priv->device == d) {
list_del(&priv->list);
put_device(d);
break;
}
}
mutex_unlock(&smbios_mutex);
dev_dbg(d, "Remove device: %s\n", d->driver->name);
}
EXPORT_SYMBOL_GPL(dell_smbios_unregister_device);
int dell_smbios_call_filter(struct device *d,
struct calling_interface_buffer *buffer)
{
u16 t = 0;
int i;
/* can't make calls over 30 */
if (buffer->cmd_class > 30) {
dev_dbg(d, "class too big: %u\n", buffer->cmd_class);
return -EINVAL;
}
/* supported calls on the particular system */
if (!(da_supported_commands & (1 << buffer->cmd_class))) {
dev_dbg(d, "invalid command, supported commands: 0x%8x\n",
da_supported_commands);
return -EINVAL;
}
/* match against call blacklist */
for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) {
if (buffer->cmd_class != call_blacklist[i].cmd_class)
continue;
if (buffer->cmd_select != call_blacklist[i].cmd_select &&
call_blacklist[i].cmd_select != -1)
continue;
dev_dbg(d, "blacklisted command: %u/%u\n",
buffer->cmd_class, buffer->cmd_select);
return -EINVAL;
}
/* if a token call, find token ID */
if ((buffer->cmd_class == CLASS_TOKEN_READ ||
buffer->cmd_class == CLASS_TOKEN_WRITE) &&
buffer->cmd_select < 3) {
/* tokens enabled ? */
if (!da_tokens) {
dev_dbg(d, "no token support on this system\n");
return -EINVAL;
}
/* find the matching token ID */
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].location != buffer->input[0])
continue;
t = da_tokens[i].tokenID;
break;
}
/* token call; but token didn't exist */
if (!t) {
dev_dbg(d, "token at location %04x doesn't exist\n",
buffer->input[0]);
return -EINVAL;
}
/* match against token blacklist */
for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) {
if (!token_blacklist[i].min || !token_blacklist[i].max)
continue;
if (t >= token_blacklist[i].min &&
t <= token_blacklist[i].max)
return -EINVAL;
}
/* match against token whitelist */
for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) {
if (!token_whitelist[i].min || !token_whitelist[i].max)
continue;
if (t < token_whitelist[i].min ||
t > token_whitelist[i].max)
continue;
if (!token_whitelist[i].need_capability ||
capable(token_whitelist[i].need_capability)) {
dev_dbg(d, "whitelisted token: %x\n", t);
return 0;
}
}
}
/* match against call whitelist */
for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) {
if (buffer->cmd_class != call_whitelist[i].cmd_class)
continue;
if (buffer->cmd_select != call_whitelist[i].cmd_select)
continue;
if (!call_whitelist[i].need_capability ||
capable(call_whitelist[i].need_capability)) {
dev_dbg(d, "whitelisted capable command: %u/%u\n",
buffer->cmd_class, buffer->cmd_select);
return 0;
}
dev_dbg(d, "missing capability %d for %u/%u\n",
call_whitelist[i].need_capability,
buffer->cmd_class, buffer->cmd_select);
}
/* not in a whitelist, only allow processes with capabilities */
if (capable(CAP_SYS_RAWIO)) {
dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n",
buffer->cmd_class, buffer->cmd_select);
return 0;
}
return -EACCES;
}
EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
int dell_smbios_call(struct calling_interface_buffer *buffer)
{
int (*call_fn)(struct calling_interface_buffer *) = NULL;
struct device *selected_dev = NULL;
struct smbios_device *priv;
int ret;
mutex_lock(&smbios_mutex);
list_for_each_entry(priv, &smbios_device_list, list) {
if (!selected_dev || priv->device->id >= selected_dev->id) {
dev_dbg(priv->device, "Trying device ID: %d\n",
priv->device->id);
call_fn = priv->call_fn;
selected_dev = priv->device;
}
}
if (!selected_dev) {
ret = -ENODEV;
pr_err("No dell-smbios drivers are loaded\n");
goto out_smbios_call;
}
ret = call_fn(buffer);
out_smbios_call:
mutex_unlock(&smbios_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(dell_smbios_call);
struct calling_interface_token *dell_smbios_find_token(int tokenid)
{
int i;
if (!da_tokens)
return NULL;
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
return &da_tokens[i];
}
return NULL;
}
EXPORT_SYMBOL_GPL(dell_smbios_find_token);
static BLOCKING_NOTIFIER_HEAD(dell_laptop_chain_head);
int dell_laptop_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&dell_laptop_chain_head, nb);
}
EXPORT_SYMBOL_GPL(dell_laptop_register_notifier);
int dell_laptop_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&dell_laptop_chain_head, nb);
}
EXPORT_SYMBOL_GPL(dell_laptop_unregister_notifier);
void dell_laptop_call_notifier(unsigned long action, void *data)
{
blocking_notifier_call_chain(&dell_laptop_chain_head, action, data);
}
EXPORT_SYMBOL_GPL(dell_laptop_call_notifier);
static void __init parse_da_table(const struct dmi_header *dm)
{
/* Final token is a terminator, so we don't want to copy it */
int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
struct calling_interface_token *new_da_tokens;
struct calling_interface_structure *table =
container_of(dm, struct calling_interface_structure, header);
/*
* 4 bytes of table header, plus 7 bytes of Dell header
* plus at least 6 bytes of entry
*/
if (dm->length < 17)
return;
da_supported_commands = table->supportedCmds;
new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
sizeof(struct calling_interface_token),
GFP_KERNEL);
if (!new_da_tokens)
return;
da_tokens = new_da_tokens;
memcpy(da_tokens+da_num_tokens, table->tokens,
sizeof(struct calling_interface_token) * tokens);
da_num_tokens += tokens;
}
static void zero_duplicates(struct device *dev)
{
int i, j;
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == 0)
continue;
for (j = i+1; j < da_num_tokens; j++) {
if (da_tokens[j].tokenID == 0)
continue;
if (da_tokens[i].tokenID == da_tokens[j].tokenID) {
dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n",
da_tokens[j].tokenID,
da_tokens[j].location,
da_tokens[j].value);
da_tokens[j].tokenID = 0;
}
}
}
}
static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0xd4: /* Indexed IO */
case 0xd5: /* Protected Area Type 1 */
case 0xd6: /* Protected Area Type 2 */
break;
case 0xda: /* Calling interface */
parse_da_table(dm);
break;
}
}
static int match_attribute(struct device *dev,
struct device_attribute *attr)
{
int i;
for (i = 0; i < da_num_tokens * 2; i++) {
if (!token_attrs[i])
continue;
if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
return i/2;
}
dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
return -EINVAL;
}
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
i = match_attribute(dev, attr);
if (i > 0)
return sysfs_emit(buf, "%08x", da_tokens[i].location);
return 0;
}
static ssize_t value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
i = match_attribute(dev, attr);
if (i > 0)
return sysfs_emit(buf, "%08x", da_tokens[i].value);
return 0;
}
static struct attribute_group smbios_attribute_group = {
.name = "tokens"
};
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-smbios",
},
};
static int build_tokens_sysfs(struct platform_device *dev)
{
char *location_name;
char *value_name;
size_t size;
int ret;
int i, j;
/* (number of tokens + 1 for null terminated */
size = sizeof(struct device_attribute) * (da_num_tokens + 1);
token_location_attrs = kzalloc(size, GFP_KERNEL);
if (!token_location_attrs)
return -ENOMEM;
token_value_attrs = kzalloc(size, GFP_KERNEL);
if (!token_value_attrs)
goto out_allocate_value;
/* need to store both location and value + terminator*/
size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
token_attrs = kzalloc(size, GFP_KERNEL);
if (!token_attrs)
goto out_allocate_attrs;
for (i = 0, j = 0; i < da_num_tokens; i++) {
/* skip empty */
if (da_tokens[i].tokenID == 0)
continue;
/* add location */
location_name = kasprintf(GFP_KERNEL, "%04x_location",
da_tokens[i].tokenID);
if (location_name == NULL)
goto out_unwind_strings;
sysfs_attr_init(&token_location_attrs[i].attr);
token_location_attrs[i].attr.name = location_name;
token_location_attrs[i].attr.mode = 0444;
token_location_attrs[i].show = location_show;
token_attrs[j++] = &token_location_attrs[i].attr;
/* add value */
value_name = kasprintf(GFP_KERNEL, "%04x_value",
da_tokens[i].tokenID);
if (value_name == NULL)
goto loop_fail_create_value;
sysfs_attr_init(&token_value_attrs[i].attr);
token_value_attrs[i].attr.name = value_name;
token_value_attrs[i].attr.mode = 0444;
token_value_attrs[i].show = value_show;
token_attrs[j++] = &token_value_attrs[i].attr;
continue;
loop_fail_create_value:
kfree(location_name);
goto out_unwind_strings;
}
smbios_attribute_group.attrs = token_attrs;
ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group);
if (ret)
goto out_unwind_strings;
return 0;
out_unwind_strings:
while (i--) {
kfree(token_location_attrs[i].attr.name);
kfree(token_value_attrs[i].attr.name);
}
kfree(token_attrs);
out_allocate_attrs:
kfree(token_value_attrs);
out_allocate_value:
kfree(token_location_attrs);
return -ENOMEM;
}
static void free_group(struct platform_device *pdev)
{
int i;
sysfs_remove_group(&pdev->dev.kobj,
&smbios_attribute_group);
for (i = 0; i < da_num_tokens; i++) {
kfree(token_location_attrs[i].attr.name);
kfree(token_value_attrs[i].attr.name);
}
kfree(token_attrs);
kfree(token_value_attrs);
kfree(token_location_attrs);
}
static int __init dell_smbios_init(void)
{
int ret, wmi, smm;
if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
pr_err("Unable to run on non-Dell system\n");
return -ENODEV;
}
dmi_walk(find_tokens, NULL);
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
platform_device = platform_device_alloc("dell-smbios", 0);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device_alloc;
}
ret = platform_device_add(platform_device);
if (ret)
goto fail_platform_device_add;
/* register backends */
wmi = init_dell_smbios_wmi();
if (wmi)
pr_debug("Failed to initialize WMI backend: %d\n", wmi);
smm = init_dell_smbios_smm();
if (smm)
pr_debug("Failed to initialize SMM backend: %d\n", smm);
if (wmi && smm) {
pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
wmi, smm);
ret = -ENODEV;
goto fail_create_group;
}
if (da_tokens) {
/* duplicate tokens will cause problems building sysfs files */
zero_duplicates(&platform_device->dev);
ret = build_tokens_sysfs(platform_device);
if (ret)
goto fail_sysfs;
}
return 0;
fail_sysfs:
free_group(platform_device);
fail_create_group:
platform_device_del(platform_device);
fail_platform_device_add:
platform_device_put(platform_device);
fail_platform_device_alloc:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
kfree(da_tokens);
return ret;
}
static void __exit dell_smbios_exit(void)
{
exit_dell_smbios_wmi();
exit_dell_smbios_smm();
mutex_lock(&smbios_mutex);
if (platform_device) {
if (da_tokens)
free_group(platform_device);
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
kfree(da_tokens);
mutex_unlock(&smbios_mutex);
}
module_init(dell_smbios_init);
module_exit(dell_smbios_exit);
MODULE_AUTHOR("Matthew Garrett <[email protected]>");
MODULE_AUTHOR("Gabriele Mazzotta <[email protected]>");
MODULE_AUTHOR("Pali Rohár <[email protected]>");
MODULE_AUTHOR("Mario Limonciello <[email protected]>");
MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-smbios-base.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dell privacy notification driver
*
* Copyright (C) 2021 Dell Inc. All Rights Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/list.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/wmi.h>
#include "dell-wmi-privacy.h"
#define DELL_PRIVACY_GUID "6932965F-1671-4CEB-B988-D3AB0A901919"
#define MICROPHONE_STATUS BIT(0)
#define CAMERA_STATUS BIT(1)
#define DELL_PRIVACY_AUDIO_EVENT 0x1
#define DELL_PRIVACY_CAMERA_EVENT 0x2
#define led_to_priv(c) container_of(c, struct privacy_wmi_data, cdev)
/*
* The wmi_list is used to store the privacy_priv struct with mutex protecting
*/
static LIST_HEAD(wmi_list);
static DEFINE_MUTEX(list_mutex);
struct privacy_wmi_data {
struct input_dev *input_dev;
struct wmi_device *wdev;
struct list_head list;
struct led_classdev cdev;
u32 features_present;
u32 last_status;
};
/* DELL Privacy Type */
enum dell_hardware_privacy_type {
DELL_PRIVACY_TYPE_AUDIO = 0,
DELL_PRIVACY_TYPE_CAMERA,
DELL_PRIVACY_TYPE_SCREEN,
DELL_PRIVACY_TYPE_MAX,
};
static const char * const privacy_types[DELL_PRIVACY_TYPE_MAX] = {
[DELL_PRIVACY_TYPE_AUDIO] = "Microphone",
[DELL_PRIVACY_TYPE_CAMERA] = "Camera Shutter",
[DELL_PRIVACY_TYPE_SCREEN] = "ePrivacy Screen",
};
/*
* Keymap for WMI privacy events of type 0x0012
*/
static const struct key_entry dell_wmi_keymap_type_0012[] = {
/* privacy mic mute */
{ KE_KEY, 0x0001, { KEY_MICMUTE } },
/* privacy camera mute */
{ KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
{ KE_END, 0},
};
bool dell_privacy_has_mic_mute(void)
{
struct privacy_wmi_data *priv;
mutex_lock(&list_mutex);
priv = list_first_entry_or_null(&wmi_list,
struct privacy_wmi_data,
list);
mutex_unlock(&list_mutex);
return priv && (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO));
}
EXPORT_SYMBOL_GPL(dell_privacy_has_mic_mute);
/*
* The flow of privacy event:
* 1) User presses key. HW does stuff with this key (timeout is started)
* 2) WMI event is emitted from BIOS
* 3) WMI event is received by dell-privacy
* 4) KEY_MICMUTE emitted from dell-privacy
* 5) Userland picks up key and modifies kcontrol for SW mute
* 6) Codec kernel driver catches and calls ledtrig_audio_set which will call
* led_set_brightness() on the LED registered by dell_privacy_leds_setup()
* 7) dell-privacy notifies EC, the timeout is cancelled and the HW mute activates.
* If the EC is not notified then the HW mic mute will activate when the timeout
* triggers, just a bit later than with the active ack.
*/
bool dell_privacy_process_event(int type, int code, int status)
{
struct privacy_wmi_data *priv;
const struct key_entry *key;
bool ret = false;
mutex_lock(&list_mutex);
priv = list_first_entry_or_null(&wmi_list,
struct privacy_wmi_data,
list);
if (!priv)
goto error;
key = sparse_keymap_entry_from_scancode(priv->input_dev, (type << 16) | code);
if (!key) {
dev_warn(&priv->wdev->dev, "Unknown key with type 0x%04x and code 0x%04x pressed\n",
type, code);
goto error;
}
dev_dbg(&priv->wdev->dev, "Key with type 0x%04x and code 0x%04x pressed\n", type, code);
switch (code) {
case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
priv->last_status = status;
sparse_keymap_report_entry(priv->input_dev, key, 1, true);
ret = true;
break;
case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
priv->last_status = status;
sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
ret = true;
break;
default:
dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
}
error:
mutex_unlock(&list_mutex);
return ret;
}
static ssize_t dell_privacy_supported_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct privacy_wmi_data *priv = dev_get_drvdata(dev);
enum dell_hardware_privacy_type type;
u32 privacy_list;
int len = 0;
privacy_list = priv->features_present;
for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
if (privacy_list & BIT(type))
len += sysfs_emit_at(buf, len, "[%s] [supported]\n", privacy_types[type]);
else
len += sysfs_emit_at(buf, len, "[%s] [unsupported]\n", privacy_types[type]);
}
return len;
}
static ssize_t dell_privacy_current_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct privacy_wmi_data *priv = dev_get_drvdata(dev);
u32 privacy_supported = priv->features_present;
enum dell_hardware_privacy_type type;
u32 privacy_state = priv->last_status;
int len = 0;
for (type = DELL_PRIVACY_TYPE_AUDIO; type < DELL_PRIVACY_TYPE_MAX; type++) {
if (privacy_supported & BIT(type)) {
if (privacy_state & BIT(type))
len += sysfs_emit_at(buf, len, "[%s] [unmuted]\n", privacy_types[type]);
else
len += sysfs_emit_at(buf, len, "[%s] [muted]\n", privacy_types[type]);
}
}
return len;
}
static DEVICE_ATTR_RO(dell_privacy_supported_type);
static DEVICE_ATTR_RO(dell_privacy_current_state);
static struct attribute *privacy_attrs[] = {
&dev_attr_dell_privacy_supported_type.attr,
&dev_attr_dell_privacy_current_state.attr,
NULL,
};
ATTRIBUTE_GROUPS(privacy);
/*
* Describes the Device State class exposed by BIOS which can be consumed by
* various applications interested in knowing the Privacy feature capabilities.
* class DeviceState
* {
* [key, read] string InstanceName;
* [read] boolean ReadOnly;
*
* [WmiDataId(1), read] uint32 DevicesSupported;
* 0 - None; 0x1 - Microphone; 0x2 - Camera; 0x4 - ePrivacy Screen
*
* [WmiDataId(2), read] uint32 CurrentState;
* 0 - Off; 1 - On; Bit0 - Microphone; Bit1 - Camera; Bit2 - ePrivacyScreen
* };
*/
static int get_current_status(struct wmi_device *wdev)
{
struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
union acpi_object *obj_present;
u32 *buffer;
int ret = 0;
if (!priv) {
dev_err(&wdev->dev, "dell privacy priv is NULL\n");
return -EINVAL;
}
/* check privacy support features and device states */
obj_present = wmidev_block_query(wdev, 0);
if (!obj_present) {
dev_err(&wdev->dev, "failed to read Binary MOF\n");
return -EIO;
}
if (obj_present->type != ACPI_TYPE_BUFFER) {
dev_err(&wdev->dev, "Binary MOF is not a buffer!\n");
ret = -EIO;
goto obj_free;
}
/* Although it's not technically a failure, this would lead to
* unexpected behavior
*/
if (obj_present->buffer.length != 8) {
dev_err(&wdev->dev, "Dell privacy buffer has unexpected length (%d)!\n",
obj_present->buffer.length);
ret = -EINVAL;
goto obj_free;
}
buffer = (u32 *)obj_present->buffer.pointer;
priv->features_present = buffer[0];
priv->last_status = buffer[1];
obj_free:
kfree(obj_present);
return ret;
}
static int dell_privacy_micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct privacy_wmi_data *priv = led_to_priv(led_cdev);
static char *acpi_method = (char *)"ECAK";
acpi_status status;
acpi_handle handle;
handle = ec_get_handle();
if (!handle)
return -EIO;
if (!acpi_has_method(handle, acpi_method))
return -EIO;
status = acpi_evaluate_object(handle, acpi_method, NULL, NULL);
if (ACPI_FAILURE(status)) {
dev_err(&priv->wdev->dev, "Error setting privacy EC ack value: %s\n",
acpi_format_exception(status));
return -EIO;
}
return 0;
}
/*
* Pressing the mute key activates a time delayed circuit to physically cut
* off the mute. The LED is in the same circuit, so it reflects the true
* state of the HW mute. The reason for the EC "ack" is so that software
* can first invoke a SW mute before the HW circuit is cut off. Without SW
* cutting this off first does not affect the time delayed muting or status
* of the LED but there is a possibility of a "popping" noise.
*
* If the EC receives the SW ack, the circuit will be activated before the
* delay completed.
*
* Exposing as an LED device allows the codec drivers notification path to
* EC ACK to work
*/
static int dell_privacy_leds_setup(struct device *dev)
{
struct privacy_wmi_data *priv = dev_get_drvdata(dev);
priv->cdev.name = "dell-privacy::micmute";
priv->cdev.max_brightness = 1;
priv->cdev.brightness_set_blocking = dell_privacy_micmute_led_set;
priv->cdev.default_trigger = "audio-micmute";
priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
return devm_led_classdev_register(dev, &priv->cdev);
}
static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct privacy_wmi_data *priv;
struct key_entry *keymap;
int ret, i, j;
ret = wmi_has_guid(DELL_PRIVACY_GUID);
if (!ret)
pr_debug("Unable to detect available Dell privacy devices!\n");
priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, priv);
priv->wdev = wdev;
ret = get_current_status(priv->wdev);
if (ret)
return ret;
/* create evdev passing interface */
priv->input_dev = devm_input_allocate_device(&wdev->dev);
if (!priv->input_dev)
return -ENOMEM;
/* remap the wmi keymap event to new keymap */
keymap = kcalloc(ARRAY_SIZE(dell_wmi_keymap_type_0012),
sizeof(struct key_entry), GFP_KERNEL);
if (!keymap)
return -ENOMEM;
/* remap the keymap code with Dell privacy key type 0x12 as prefix
* KEY_MICMUTE scancode will be reported as 0x120001
*/
for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
/*
* Unlike keys where only presses matter, userspace may act
* on switches in both of their positions. Only register
* SW_CAMERA_LENS_COVER if it is actually there.
*/
if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
!(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
continue;
keymap[j] = dell_wmi_keymap_type_0012[i];
keymap[j].code |= (0x0012 << 16);
j++;
}
ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
kfree(keymap);
if (ret)
return ret;
priv->input_dev->dev.parent = &wdev->dev;
priv->input_dev->name = "Dell Privacy Driver";
priv->input_dev->id.bustype = BUS_HOST;
/* Report initial camera-cover status */
if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
!(priv->last_status & CAMERA_STATUS));
ret = input_register_device(priv->input_dev);
if (ret)
return ret;
if (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO)) {
ret = dell_privacy_leds_setup(&priv->wdev->dev);
if (ret)
return ret;
}
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
return 0;
}
static void dell_privacy_wmi_remove(struct wmi_device *wdev)
{
struct privacy_wmi_data *priv = dev_get_drvdata(&wdev->dev);
mutex_lock(&list_mutex);
list_del(&priv->list);
mutex_unlock(&list_mutex);
}
static const struct wmi_device_id dell_wmi_privacy_wmi_id_table[] = {
{ .guid_string = DELL_PRIVACY_GUID },
{ },
};
static struct wmi_driver dell_privacy_wmi_driver = {
.driver = {
.name = "dell-privacy",
.dev_groups = privacy_groups,
},
.probe = dell_privacy_wmi_probe,
.remove = dell_privacy_wmi_remove,
.id_table = dell_wmi_privacy_wmi_id_table,
};
int dell_privacy_register_driver(void)
{
return wmi_driver_register(&dell_privacy_wmi_driver);
}
void dell_privacy_unregister_driver(void)
{
wmi_driver_unregister(&dell_privacy_wmi_driver);
}
| linux-master | drivers/platform/x86/dell/dell-wmi-privacy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Alienware AlienFX control
*
* Copyright (C) 2014 Dell Inc <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dmi.h>
#include <linux/leds.h>
#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
#define WMAX_METHOD_HDMI_SOURCE 0x1
#define WMAX_METHOD_HDMI_STATUS 0x2
#define WMAX_METHOD_BRIGHTNESS 0x3
#define WMAX_METHOD_ZONE_CONTROL 0x4
#define WMAX_METHOD_HDMI_CABLE 0x5
#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
MODULE_AUTHOR("Mario Limonciello <[email protected]>");
MODULE_DESCRIPTION("Alienware special feature control");
MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
enum INTERFACE_FLAGS {
LEGACY,
WMAX,
};
enum LEGACY_CONTROL_STATES {
LEGACY_RUNNING = 1,
LEGACY_BOOTING = 0,
LEGACY_SUSPEND = 3,
};
enum WMAX_CONTROL_STATES {
WMAX_RUNNING = 0xFF,
WMAX_BOOTING = 0,
WMAX_SUSPEND = 3,
};
struct quirk_entry {
u8 num_zones;
u8 hdmi_mux;
u8 amplifier;
u8 deepslp;
};
static struct quirk_entry *quirks;
static struct quirk_entry quirk_inspiron5675 = {
.num_zones = 2,
.hdmi_mux = 0,
.amplifier = 0,
.deepslp = 0,
};
static struct quirk_entry quirk_unknown = {
.num_zones = 2,
.hdmi_mux = 0,
.amplifier = 0,
.deepslp = 0,
};
static struct quirk_entry quirk_x51_r1_r2 = {
.num_zones = 3,
.hdmi_mux = 0,
.amplifier = 0,
.deepslp = 0,
};
static struct quirk_entry quirk_x51_r3 = {
.num_zones = 4,
.hdmi_mux = 0,
.amplifier = 1,
.deepslp = 0,
};
static struct quirk_entry quirk_asm100 = {
.num_zones = 2,
.hdmi_mux = 1,
.amplifier = 0,
.deepslp = 0,
};
static struct quirk_entry quirk_asm200 = {
.num_zones = 2,
.hdmi_mux = 1,
.amplifier = 0,
.deepslp = 1,
};
static struct quirk_entry quirk_asm201 = {
.num_zones = 2,
.hdmi_mux = 1,
.amplifier = 1,
.deepslp = 1,
};
static int __init dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
return 1;
}
static const struct dmi_system_id alienware_quirks[] __initconst = {
{
.callback = dmi_matched,
.ident = "Alienware X51 R3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
},
.driver_data = &quirk_x51_r3,
},
{
.callback = dmi_matched,
.ident = "Alienware X51 R2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
},
.driver_data = &quirk_x51_r1_r2,
},
{
.callback = dmi_matched,
.ident = "Alienware X51 R1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
},
.driver_data = &quirk_x51_r1_r2,
},
{
.callback = dmi_matched,
.ident = "Alienware ASM100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
},
.driver_data = &quirk_asm100,
},
{
.callback = dmi_matched,
.ident = "Alienware ASM200",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
},
.driver_data = &quirk_asm200,
},
{
.callback = dmi_matched,
.ident = "Alienware ASM201",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
},
.driver_data = &quirk_asm201,
},
{
.callback = dmi_matched,
.ident = "Dell Inc. Inspiron 5675",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
},
.driver_data = &quirk_inspiron5675,
},
{}
};
struct color_platform {
u8 blue;
u8 green;
u8 red;
} __packed;
struct platform_zone {
u8 location;
struct device_attribute *attr;
struct color_platform colors;
};
struct wmax_brightness_args {
u32 led_mask;
u32 percentage;
};
struct wmax_basic_args {
u8 arg;
};
struct legacy_led_args {
struct color_platform colors;
u8 brightness;
u8 state;
} __packed;
struct wmax_led_args {
u32 led_mask;
struct color_platform colors;
u8 state;
} __packed;
static struct platform_device *platform_device;
static struct device_attribute *zone_dev_attrs;
static struct attribute **zone_attrs;
static struct platform_zone *zone_data;
static struct platform_driver platform_driver = {
.driver = {
.name = "alienware-wmi",
}
};
static struct attribute_group zone_attribute_group = {
.name = "rgb_zones",
};
static u8 interface;
static u8 lighting_control_state;
static u8 global_brightness;
/*
* Helpers used for zone control
*/
static int parse_rgb(const char *buf, struct platform_zone *zone)
{
long unsigned int rgb;
int ret;
union color_union {
struct color_platform cp;
int package;
} repackager;
ret = kstrtoul(buf, 16, &rgb);
if (ret)
return ret;
/* RGB triplet notation is 24-bit hexadecimal */
if (rgb > 0xFFFFFF)
return -EINVAL;
repackager.package = rgb & 0x0f0f0f0f;
pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
repackager.cp.red, repackager.cp.green, repackager.cp.blue);
zone->colors = repackager.cp;
return 0;
}
static struct platform_zone *match_zone(struct device_attribute *attr)
{
u8 zone;
for (zone = 0; zone < quirks->num_zones; zone++) {
if ((struct device_attribute *)zone_data[zone].attr == attr) {
pr_debug("alienware-wmi: matched zone location: %d\n",
zone_data[zone].location);
return &zone_data[zone];
}
}
return NULL;
}
/*
* Individual RGB zone control
*/
static int alienware_update_led(struct platform_zone *zone)
{
int method_id;
acpi_status status;
char *guid;
struct acpi_buffer input;
struct legacy_led_args legacy_args;
struct wmax_led_args wmax_basic_args;
if (interface == WMAX) {
wmax_basic_args.led_mask = 1 << zone->location;
wmax_basic_args.colors = zone->colors;
wmax_basic_args.state = lighting_control_state;
guid = WMAX_CONTROL_GUID;
method_id = WMAX_METHOD_ZONE_CONTROL;
input.length = (acpi_size) sizeof(wmax_basic_args);
input.pointer = &wmax_basic_args;
} else {
legacy_args.colors = zone->colors;
legacy_args.brightness = global_brightness;
legacy_args.state = 0;
if (lighting_control_state == LEGACY_BOOTING ||
lighting_control_state == LEGACY_SUSPEND) {
guid = LEGACY_POWER_CONTROL_GUID;
legacy_args.state = lighting_control_state;
} else
guid = LEGACY_CONTROL_GUID;
method_id = zone->location + 1;
input.length = (acpi_size) sizeof(legacy_args);
input.pointer = &legacy_args;
}
pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
status = wmi_evaluate_method(guid, 0, method_id, &input, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: zone set failure: %u\n", status);
return ACPI_FAILURE(status);
}
static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct platform_zone *target_zone;
target_zone = match_zone(attr);
if (target_zone == NULL)
return sprintf(buf, "red: -1, green: -1, blue: -1\n");
return sprintf(buf, "red: %d, green: %d, blue: %d\n",
target_zone->colors.red,
target_zone->colors.green, target_zone->colors.blue);
}
static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct platform_zone *target_zone;
int ret;
target_zone = match_zone(attr);
if (target_zone == NULL) {
pr_err("alienware-wmi: invalid target zone\n");
return 1;
}
ret = parse_rgb(buf, target_zone);
if (ret)
return ret;
ret = alienware_update_led(target_zone);
return ret ? ret : count;
}
/*
* LED Brightness (Global)
*/
static int wmax_brightness(int brightness)
{
acpi_status status;
struct acpi_buffer input;
struct wmax_brightness_args args = {
.led_mask = 0xFF,
.percentage = brightness,
};
input.length = (acpi_size) sizeof(args);
input.pointer = &args;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
WMAX_METHOD_BRIGHTNESS, &input, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: brightness set failure: %u\n", status);
return ACPI_FAILURE(status);
}
static void global_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
int ret;
global_brightness = brightness;
if (interface == WMAX)
ret = wmax_brightness(brightness);
else
ret = alienware_update_led(&zone_data[0]);
if (ret)
pr_err("LED brightness update failed\n");
}
static enum led_brightness global_led_get(struct led_classdev *led_cdev)
{
return global_brightness;
}
static struct led_classdev global_led = {
.brightness_set = global_led_set,
.brightness_get = global_led_get,
.name = "alienware::global_brightness",
};
/*
* Lighting control state device attribute (Global)
*/
static ssize_t show_control_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (lighting_control_state == LEGACY_BOOTING)
return sysfs_emit(buf, "[booting] running suspend\n");
else if (lighting_control_state == LEGACY_SUSPEND)
return sysfs_emit(buf, "booting running [suspend]\n");
return sysfs_emit(buf, "booting [running] suspend\n");
}
static ssize_t store_control_state(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
long unsigned int val;
if (strcmp(buf, "booting\n") == 0)
val = LEGACY_BOOTING;
else if (strcmp(buf, "suspend\n") == 0)
val = LEGACY_SUSPEND;
else if (interface == LEGACY)
val = LEGACY_RUNNING;
else
val = WMAX_RUNNING;
lighting_control_state = val;
pr_debug("alienware-wmi: updated control state to %d\n",
lighting_control_state);
return count;
}
static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
store_control_state);
static int alienware_zone_init(struct platform_device *dev)
{
u8 zone;
char buffer[10];
char *name;
if (interface == WMAX) {
lighting_control_state = WMAX_RUNNING;
} else if (interface == LEGACY) {
lighting_control_state = LEGACY_RUNNING;
}
global_led.max_brightness = 0x0F;
global_brightness = global_led.max_brightness;
/*
* - zone_dev_attrs num_zones + 1 is for individual zones and then
* null terminated
* - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
* the lighting control + null terminated
* - zone_data num_zones is for the distinct zones
*/
zone_dev_attrs =
kcalloc(quirks->num_zones + 1, sizeof(struct device_attribute),
GFP_KERNEL);
if (!zone_dev_attrs)
return -ENOMEM;
zone_attrs =
kcalloc(quirks->num_zones + 2, sizeof(struct attribute *),
GFP_KERNEL);
if (!zone_attrs)
return -ENOMEM;
zone_data =
kcalloc(quirks->num_zones, sizeof(struct platform_zone),
GFP_KERNEL);
if (!zone_data)
return -ENOMEM;
for (zone = 0; zone < quirks->num_zones; zone++) {
sprintf(buffer, "zone%02hhX", zone);
name = kstrdup(buffer, GFP_KERNEL);
if (name == NULL)
return 1;
sysfs_attr_init(&zone_dev_attrs[zone].attr);
zone_dev_attrs[zone].attr.name = name;
zone_dev_attrs[zone].attr.mode = 0644;
zone_dev_attrs[zone].show = zone_show;
zone_dev_attrs[zone].store = zone_set;
zone_data[zone].location = zone;
zone_attrs[zone] = &zone_dev_attrs[zone].attr;
zone_data[zone].attr = &zone_dev_attrs[zone];
}
zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
zone_attribute_group.attrs = zone_attrs;
led_classdev_register(&dev->dev, &global_led);
return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
}
static void alienware_zone_exit(struct platform_device *dev)
{
u8 zone;
sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
led_classdev_unregister(&global_led);
if (zone_dev_attrs) {
for (zone = 0; zone < quirks->num_zones; zone++)
kfree(zone_dev_attrs[zone].attr.name);
}
kfree(zone_dev_attrs);
kfree(zone_data);
kfree(zone_attrs);
}
static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
u32 command, int *out_data)
{
acpi_status status;
union acpi_object *obj;
struct acpi_buffer input;
struct acpi_buffer output;
input.length = (acpi_size) sizeof(*in_args);
input.pointer = in_args;
if (out_data) {
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, &output);
if (ACPI_SUCCESS(status)) {
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32)obj->integer.value;
}
kfree(output.pointer);
} else {
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, NULL);
}
return status;
}
/*
* The HDMI mux sysfs node indicates the status of the HDMI input mux.
* It can toggle between standard system GPU output and HDMI input.
*/
static ssize_t show_hdmi_cable(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
status =
alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_CABLE,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
if (out_data == 0)
return sysfs_emit(buf, "[unconnected] connected unknown\n");
else if (out_data == 1)
return sysfs_emit(buf, "unconnected [connected] unknown\n");
}
pr_err("alienware-wmi: unknown HDMI cable status: %d\n", status);
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
static ssize_t show_hdmi_source(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
status =
alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_STATUS,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
if (out_data == 1)
return sysfs_emit(buf, "[input] gpu unknown\n");
else if (out_data == 2)
return sysfs_emit(buf, "input [gpu] unknown\n");
}
pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return sysfs_emit(buf, "input gpu [unknown]\n");
}
static ssize_t toggle_hdmi_source(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
acpi_status status;
struct wmax_basic_args args;
if (strcmp(buf, "gpu\n") == 0)
args.arg = 1;
else if (strcmp(buf, "input\n") == 0)
args.arg = 2;
else
args.arg = 3;
pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
status = alienware_wmax_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
status);
return count;
}
static DEVICE_ATTR(cable, S_IRUGO, show_hdmi_cable, NULL);
static DEVICE_ATTR(source, S_IRUGO | S_IWUSR, show_hdmi_source,
toggle_hdmi_source);
static struct attribute *hdmi_attrs[] = {
&dev_attr_cable.attr,
&dev_attr_source.attr,
NULL,
};
static const struct attribute_group hdmi_attribute_group = {
.name = "hdmi",
.attrs = hdmi_attrs,
};
static void remove_hdmi(struct platform_device *dev)
{
if (quirks->hdmi_mux > 0)
sysfs_remove_group(&dev->dev.kobj, &hdmi_attribute_group);
}
static int create_hdmi(struct platform_device *dev)
{
int ret;
ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
if (ret)
remove_hdmi(dev);
return ret;
}
/*
* Alienware GFX amplifier support
* - Currently supports reading cable status
* - Leaving expansion room to possibly support dock/undock events later
*/
static ssize_t show_amplifier_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
status =
alienware_wmax_command(&in_args, WMAX_METHOD_AMPLIFIER_CABLE,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
if (out_data == 0)
return sysfs_emit(buf, "[unconnected] connected unknown\n");
else if (out_data == 1)
return sysfs_emit(buf, "unconnected [connected] unknown\n");
}
pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
return sysfs_emit(buf, "unconnected connected [unknown]\n");
}
static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
static struct attribute *amplifier_attrs[] = {
&dev_attr_status.attr,
NULL,
};
static const struct attribute_group amplifier_attribute_group = {
.name = "amplifier",
.attrs = amplifier_attrs,
};
static void remove_amplifier(struct platform_device *dev)
{
if (quirks->amplifier > 0)
sysfs_remove_group(&dev->dev.kobj, &lifier_attribute_group);
}
static int create_amplifier(struct platform_device *dev)
{
int ret;
ret = sysfs_create_group(&dev->dev.kobj, &lifier_attribute_group);
if (ret)
remove_amplifier(dev);
return ret;
}
/*
* Deep Sleep Control support
* - Modifies BIOS setting for deep sleep control allowing extra wakeup events
*/
static ssize_t show_deepsleep_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
u32 out_data;
struct wmax_basic_args in_args = {
.arg = 0,
};
status = alienware_wmax_command(&in_args, WMAX_METHOD_DEEP_SLEEP_STATUS,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
if (out_data == 0)
return sysfs_emit(buf, "[disabled] s5 s5_s4\n");
else if (out_data == 1)
return sysfs_emit(buf, "disabled [s5] s5_s4\n");
else if (out_data == 2)
return sysfs_emit(buf, "disabled s5 [s5_s4]\n");
}
pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
}
static ssize_t toggle_deepsleep(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
acpi_status status;
struct wmax_basic_args args;
if (strcmp(buf, "disabled\n") == 0)
args.arg = 0;
else if (strcmp(buf, "s5\n") == 0)
args.arg = 1;
else
args.arg = 2;
pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
status = alienware_wmax_command(&args, WMAX_METHOD_DEEP_SLEEP_CONTROL,
NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
status);
return count;
}
static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
static struct attribute *deepsleep_attrs[] = {
&dev_attr_deepsleep.attr,
NULL,
};
static const struct attribute_group deepsleep_attribute_group = {
.name = "deepsleep",
.attrs = deepsleep_attrs,
};
static void remove_deepsleep(struct platform_device *dev)
{
if (quirks->deepslp > 0)
sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
}
static int create_deepsleep(struct platform_device *dev)
{
int ret;
ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
if (ret)
remove_deepsleep(dev);
return ret;
}
static int __init alienware_wmi_init(void)
{
int ret;
if (wmi_has_guid(LEGACY_CONTROL_GUID))
interface = LEGACY;
else if (wmi_has_guid(WMAX_CONTROL_GUID))
interface = WMAX;
else {
pr_warn("alienware-wmi: No known WMI GUID found\n");
return -ENODEV;
}
dmi_check_system(alienware_quirks);
if (quirks == NULL)
quirks = &quirk_unknown;
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
}
ret = platform_device_add(platform_device);
if (ret)
goto fail_platform_device2;
if (quirks->hdmi_mux > 0) {
ret = create_hdmi(platform_device);
if (ret)
goto fail_prep_hdmi;
}
if (quirks->amplifier > 0) {
ret = create_amplifier(platform_device);
if (ret)
goto fail_prep_amplifier;
}
if (quirks->deepslp > 0) {
ret = create_deepsleep(platform_device);
if (ret)
goto fail_prep_deepsleep;
}
ret = alienware_zone_init(platform_device);
if (ret)
goto fail_prep_zones;
return 0;
fail_prep_zones:
alienware_zone_exit(platform_device);
fail_prep_deepsleep:
fail_prep_amplifier:
fail_prep_hdmi:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
return ret;
}
module_init(alienware_wmi_init);
static void __exit alienware_wmi_exit(void)
{
if (platform_device) {
alienware_zone_exit(platform_device);
remove_hdmi(platform_device);
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
}
module_exit(alienware_wmi_exit);
| linux-master | drivers/platform/x86/dell/alienware-wmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <[email protected]>
* Copyright (c) 2014 Gabriele Mazzotta <[email protected]>
* Copyright (c) 2014 Pali Rohár <[email protected]>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/rfkill.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include <linux/mm.h>
#include <linux/i8042.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <acpi/video.h>
#include "dell-rbtn.h"
#include "dell-smbios.h"
#include "dell-wmi-privacy.h"
struct quirk_entry {
bool touchpad_led;
bool kbd_led_not_present;
bool kbd_led_levels_off_1;
bool kbd_missing_ac_tag;
bool needs_kbd_timeouts;
/*
* Ordered list of timeouts expressed in seconds.
* The list must end with -1
*/
int kbd_timeouts[];
};
static struct quirk_entry *quirks;
static struct quirk_entry quirk_dell_vostro_v130 = {
.touchpad_led = true,
};
static int __init dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
return 1;
}
/*
* These values come from Windows utility provided by Dell. If any other value
* is used then BIOS silently set timeout to 0 without any error message.
*/
static struct quirk_entry quirk_dell_xps13_9333 = {
.needs_kbd_timeouts = true,
.kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
};
static struct quirk_entry quirk_dell_xps13_9370 = {
.kbd_missing_ac_tag = true,
};
static struct quirk_entry quirk_dell_latitude_e6410 = {
.kbd_led_levels_off_1 = true,
};
static struct quirk_entry quirk_dell_inspiron_1012 = {
.kbd_led_not_present = true,
};
static struct quirk_entry quirk_dell_latitude_7520 = {
.kbd_missing_ac_tag = true,
};
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
}
};
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
static bool force_rfkill;
static bool micmute_led_registered;
static bool mute_led_registered;
module_param(force_rfkill, bool, 0444);
MODULE_PARM_DESC(force_rfkill, "enable rfkill on non whitelisted models");
static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
},
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
},
},
{
.ident = "Dell Computer Corporation",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
},
},
{ }
};
MODULE_DEVICE_TABLE(dmi, dell_device_table);
static const struct dmi_system_id dell_quirks[] __initconst = {
{
.callback = dmi_matched,
.ident = "Dell Vostro V130",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro V131",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3350",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3555",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron N311z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron M5110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3460",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3560",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Vostro 3450",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 5420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 5520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 5720",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 7420",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 7520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 7720",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
.ident = "Dell XPS13 9333",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
},
.driver_data = &quirk_dell_xps13_9333,
},
{
.callback = dmi_matched,
.ident = "Dell XPS 13 9370",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9370"),
},
.driver_data = &quirk_dell_xps13_9370,
},
{
.callback = dmi_matched,
.ident = "Dell Latitude E6410",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
},
.driver_data = &quirk_dell_latitude_e6410,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 1012",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
},
.driver_data = &quirk_dell_inspiron_1012,
},
{
.callback = dmi_matched,
.ident = "Dell Inspiron 1018",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1018"),
},
.driver_data = &quirk_dell_inspiron_1012,
},
{
.callback = dmi_matched,
.ident = "Dell Latitude 7520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 7520"),
},
.driver_data = &quirk_dell_latitude_7520,
},
{ }
};
static void dell_fill_request(struct calling_interface_buffer *buffer,
u32 arg0, u32 arg1, u32 arg2, u32 arg3)
{
memset(buffer, 0, sizeof(struct calling_interface_buffer));
buffer->input[0] = arg0;
buffer->input[1] = arg1;
buffer->input[2] = arg2;
buffer->input[3] = arg3;
}
static int dell_send_request(struct calling_interface_buffer *buffer,
u16 class, u16 select)
{
int ret;
buffer->cmd_class = class;
buffer->cmd_select = select;
ret = dell_smbios_call(buffer);
if (ret != 0)
return ret;
return dell_smbios_error(buffer->output[0]);
}
/*
* Derived from information in smbios-wireless-ctl:
*
* cbSelect 17, Value 11
*
* Return Wireless Info
* cbArg1, byte0 = 0x00
*
* cbRes1 Standard return codes (0, -1, -2)
* cbRes2 Info bit flags:
*
* 0 Hardware switch supported (1)
* 1 WiFi locator supported (1)
* 2 WLAN supported (1)
* 3 Bluetooth (BT) supported (1)
* 4 WWAN supported (1)
* 5 Wireless KBD supported (1)
* 6 Uw b supported (1)
* 7 WiGig supported (1)
* 8 WLAN installed (1)
* 9 BT installed (1)
* 10 WWAN installed (1)
* 11 Uw b installed (1)
* 12 WiGig installed (1)
* 13-15 Reserved (0)
* 16 Hardware (HW) switch is On (1)
* 17 WLAN disabled (1)
* 18 BT disabled (1)
* 19 WWAN disabled (1)
* 20 Uw b disabled (1)
* 21 WiGig disabled (1)
* 20-31 Reserved (0)
*
* cbRes3 NVRAM size in bytes
* cbRes4, byte 0 NVRAM format version number
*
*
* Set QuickSet Radio Disable Flag
* cbArg1, byte0 = 0x01
* cbArg1, byte1
* Radio ID value:
* 0 Radio Status
* 1 WLAN ID
* 2 BT ID
* 3 WWAN ID
* 4 UWB ID
* 5 WIGIG ID
* cbArg1, byte2 Flag bits:
* 0 QuickSet disables radio (1)
* 1-7 Reserved (0)
*
* cbRes1 Standard return codes (0, -1, -2)
* cbRes2 QuickSet (QS) radio disable bit map:
* 0 QS disables WLAN
* 1 QS disables BT
* 2 QS disables WWAN
* 3 QS disables UWB
* 4 QS disables WIGIG
* 5-31 Reserved (0)
*
* Wireless Switch Configuration
* cbArg1, byte0 = 0x02
*
* cbArg1, byte1
* Subcommand:
* 0 Get config
* 1 Set config
* 2 Set WiFi locator enable/disable
* cbArg1,byte2
* Switch settings (if byte 1==1):
* 0 WLAN sw itch control (1)
* 1 BT sw itch control (1)
* 2 WWAN sw itch control (1)
* 3 UWB sw itch control (1)
* 4 WiGig sw itch control (1)
* 5-7 Reserved (0)
* cbArg1, byte2 Enable bits (if byte 1==2):
* 0 Enable WiFi locator (1)
*
* cbRes1 Standard return codes (0, -1, -2)
* cbRes2 QuickSet radio disable bit map:
* 0 WLAN controlled by sw itch (1)
* 1 BT controlled by sw itch (1)
* 2 WWAN controlled by sw itch (1)
* 3 UWB controlled by sw itch (1)
* 4 WiGig controlled by sw itch (1)
* 5-6 Reserved (0)
* 7 Wireless sw itch config locked (1)
* 8 WiFi locator enabled (1)
* 9-14 Reserved (0)
* 15 WiFi locator setting locked (1)
* 16-31 Reserved (0)
*
* Read Local Config Data (LCD)
* cbArg1, byte0 = 0x10
* cbArg1, byte1 NVRAM index low byte
* cbArg1, byte2 NVRAM index high byte
* cbRes1 Standard return codes (0, -1, -2)
* cbRes2 4 bytes read from LCD[index]
* cbRes3 4 bytes read from LCD[index+4]
* cbRes4 4 bytes read from LCD[index+8]
*
* Write Local Config Data (LCD)
* cbArg1, byte0 = 0x11
* cbArg1, byte1 NVRAM index low byte
* cbArg1, byte2 NVRAM index high byte
* cbArg2 4 bytes to w rite at LCD[index]
* cbArg3 4 bytes to w rite at LCD[index+4]
* cbArg4 4 bytes to w rite at LCD[index+8]
* cbRes1 Standard return codes (0, -1, -2)
*
* Populate Local Config Data from NVRAM
* cbArg1, byte0 = 0x12
* cbRes1 Standard return codes (0, -1, -2)
*
* Commit Local Config Data to NVRAM
* cbArg1, byte0 = 0x13
* cbRes1 Standard return codes (0, -1, -2)
*/
static int dell_rfkill_set(void *data, bool blocked)
{
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
struct calling_interface_buffer buffer;
int hwswitch;
int status;
int ret;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret)
return ret;
status = buffer.output[1];
dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret)
return ret;
hwswitch = buffer.output[1];
/* If the hardware switch controls this radio, and the hardware
switch is disabled, always disable the radio */
if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
(status & BIT(0)) && !(status & BIT(16)))
disable = 1;
dell_fill_request(&buffer, 1 | (radio<<8) | (disable << 16), 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
return ret;
}
static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
int status)
{
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
struct calling_interface_buffer buffer;
int block = rfkill_blocked(rfkill);
dell_fill_request(&buffer,
1 | (radio << 8) | (block << 16), 0, 0, 0);
dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
} else {
/* No hw-switch, sync BIOS state to sw_state */
rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
}
}
static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
int status, int hwswitch)
{
if (hwswitch & (BIT(radio - 1)))
rfkill_set_hw_state(rfkill, !(status & BIT(16)));
}
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
int radio = ((unsigned long)data & 0xF);
struct calling_interface_buffer buffer;
int hwswitch;
int status;
int ret;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
status = buffer.output[1];
if (ret != 0 || !(status & BIT(0))) {
return;
}
dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer.output[1];
if (ret != 0)
return;
dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
}
static const struct rfkill_ops dell_rfkill_ops = {
.set_block = dell_rfkill_set,
.query = dell_rfkill_query,
};
static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
struct calling_interface_buffer buffer;
int hwswitch_state;
int hwswitch_ret;
int status;
int ret;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret)
return ret;
status = buffer.output[1];
dell_fill_request(&buffer, 0x2, 0, 0, 0);
hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (hwswitch_ret)
return hwswitch_ret;
hwswitch_state = buffer.output[1];
seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
status & BIT(0));
seq_printf(s, "Bit 1 : Wifi locator supported: %lu\n",
(status & BIT(1)) >> 1);
seq_printf(s, "Bit 2 : Wifi is supported: %lu\n",
(status & BIT(2)) >> 2);
seq_printf(s, "Bit 3 : Bluetooth is supported: %lu\n",
(status & BIT(3)) >> 3);
seq_printf(s, "Bit 4 : WWAN is supported: %lu\n",
(status & BIT(4)) >> 4);
seq_printf(s, "Bit 5 : Wireless keyboard supported: %lu\n",
(status & BIT(5)) >> 5);
seq_printf(s, "Bit 6 : UWB supported: %lu\n",
(status & BIT(6)) >> 6);
seq_printf(s, "Bit 7 : WiGig supported: %lu\n",
(status & BIT(7)) >> 7);
seq_printf(s, "Bit 8 : Wifi is installed: %lu\n",
(status & BIT(8)) >> 8);
seq_printf(s, "Bit 9 : Bluetooth is installed: %lu\n",
(status & BIT(9)) >> 9);
seq_printf(s, "Bit 10: WWAN is installed: %lu\n",
(status & BIT(10)) >> 10);
seq_printf(s, "Bit 11: UWB installed: %lu\n",
(status & BIT(11)) >> 11);
seq_printf(s, "Bit 12: WiGig installed: %lu\n",
(status & BIT(12)) >> 12);
seq_printf(s, "Bit 16: Hardware switch is on: %lu\n",
(status & BIT(16)) >> 16);
seq_printf(s, "Bit 17: Wifi is blocked: %lu\n",
(status & BIT(17)) >> 17);
seq_printf(s, "Bit 18: Bluetooth is blocked: %lu\n",
(status & BIT(18)) >> 18);
seq_printf(s, "Bit 19: WWAN is blocked: %lu\n",
(status & BIT(19)) >> 19);
seq_printf(s, "Bit 20: UWB is blocked: %lu\n",
(status & BIT(20)) >> 20);
seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
(status & BIT(21)) >> 21);
seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
hwswitch_state & BIT(0));
seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
(hwswitch_state & BIT(1)) >> 1);
seq_printf(s, "Bit 2 : WWAN controlled by switch: %lu\n",
(hwswitch_state & BIT(2)) >> 2);
seq_printf(s, "Bit 3 : UWB controlled by switch: %lu\n",
(hwswitch_state & BIT(3)) >> 3);
seq_printf(s, "Bit 4 : WiGig controlled by switch: %lu\n",
(hwswitch_state & BIT(4)) >> 4);
seq_printf(s, "Bit 7 : Wireless switch config locked: %lu\n",
(hwswitch_state & BIT(7)) >> 7);
seq_printf(s, "Bit 8 : Wifi locator enabled: %lu\n",
(hwswitch_state & BIT(8)) >> 8);
seq_printf(s, "Bit 15: Wifi locator setting locked: %lu\n",
(hwswitch_state & BIT(15)) >> 15);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dell_debugfs);
static void dell_update_rfkill(struct work_struct *ignored)
{
struct calling_interface_buffer buffer;
int hwswitch = 0;
int status;
int ret;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
status = buffer.output[1];
if (ret != 0)
return;
dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0)))
hwswitch = buffer.output[1];
if (wifi_rfkill) {
dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
}
if (bluetooth_rfkill) {
dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
hwswitch);
dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
}
if (wwan_rfkill) {
dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
}
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
static bool extended;
if (str & I8042_STR_AUXDATA)
return false;
if (unlikely(data == 0xe0)) {
extended = true;
return false;
} else if (unlikely(extended)) {
switch (data) {
case 0x8:
schedule_delayed_work(&dell_rfkill_work,
round_jiffies_relative(HZ / 4));
break;
}
extended = false;
}
return false;
}
static int (*dell_rbtn_notifier_register_func)(struct notifier_block *);
static int (*dell_rbtn_notifier_unregister_func)(struct notifier_block *);
static int dell_laptop_rbtn_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
schedule_delayed_work(&dell_rfkill_work, 0);
return NOTIFY_OK;
}
static struct notifier_block dell_laptop_rbtn_notifier = {
.notifier_call = dell_laptop_rbtn_notifier_call,
};
static int __init dell_setup_rfkill(void)
{
struct calling_interface_buffer buffer;
int status, ret, whitelisted;
const char *product;
/*
* rfkill support causes trouble on various models, mostly Inspirons.
* So we whitelist certain series, and don't support rfkill on others.
*/
whitelisted = 0;
product = dmi_get_system_info(DMI_PRODUCT_NAME);
if (product && (strncmp(product, "Latitude", 8) == 0 ||
strncmp(product, "Precision", 9) == 0))
whitelisted = 1;
if (!force_rfkill && !whitelisted)
return 0;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
status = buffer.output[1];
/* dell wireless info smbios call is not supported */
if (ret != 0)
return 0;
/* rfkill is only tested on laptops with a hwswitch */
if (!(status & BIT(0)) && !force_rfkill)
return 0;
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
RFKILL_TYPE_WLAN,
&dell_rfkill_ops, (void *) 1);
if (!wifi_rfkill) {
ret = -ENOMEM;
goto err_wifi;
}
ret = rfkill_register(wifi_rfkill);
if (ret)
goto err_wifi;
}
if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
&platform_device->dev,
RFKILL_TYPE_BLUETOOTH,
&dell_rfkill_ops, (void *) 2);
if (!bluetooth_rfkill) {
ret = -ENOMEM;
goto err_bluetooth;
}
ret = rfkill_register(bluetooth_rfkill);
if (ret)
goto err_bluetooth;
}
if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
wwan_rfkill = rfkill_alloc("dell-wwan",
&platform_device->dev,
RFKILL_TYPE_WWAN,
&dell_rfkill_ops, (void *) 3);
if (!wwan_rfkill) {
ret = -ENOMEM;
goto err_wwan;
}
ret = rfkill_register(wwan_rfkill);
if (ret)
goto err_wwan;
}
/*
* Dell Airplane Mode Switch driver (dell-rbtn) supports ACPI devices
* which can receive events from HW slider switch.
*
* Dell SMBIOS on whitelisted models supports controlling radio devices
* but does not support receiving HW button switch events. We can use
* i8042 filter hook function to receive keyboard data and handle
* keycode for HW button.
*
* So if it is possible we will use Dell Airplane Mode Switch ACPI
* driver for receiving HW events and Dell SMBIOS for setting rfkill
* states. If ACPI driver or device is not available we will fallback to
* i8042 filter hook function.
*
* To prevent duplicate rfkill devices which control and do same thing,
* dell-rbtn driver will automatically remove its own rfkill devices
* once function dell_rbtn_notifier_register() is called.
*/
dell_rbtn_notifier_register_func =
symbol_request(dell_rbtn_notifier_register);
if (dell_rbtn_notifier_register_func) {
dell_rbtn_notifier_unregister_func =
symbol_request(dell_rbtn_notifier_unregister);
if (!dell_rbtn_notifier_unregister_func) {
symbol_put(dell_rbtn_notifier_register);
dell_rbtn_notifier_register_func = NULL;
}
}
if (dell_rbtn_notifier_register_func) {
ret = dell_rbtn_notifier_register_func(
&dell_laptop_rbtn_notifier);
symbol_put(dell_rbtn_notifier_register);
dell_rbtn_notifier_register_func = NULL;
if (ret != 0) {
symbol_put(dell_rbtn_notifier_unregister);
dell_rbtn_notifier_unregister_func = NULL;
}
} else {
pr_info("Symbols from dell-rbtn acpi driver are not available\n");
ret = -ENODEV;
}
if (ret == 0) {
pr_info("Using dell-rbtn acpi driver for receiving events\n");
} else if (ret != -ENODEV) {
pr_warn("Unable to register dell rbtn notifier\n");
goto err_filter;
} else {
ret = i8042_install_filter(dell_laptop_i8042_filter);
if (ret) {
pr_warn("Unable to install key filter\n");
goto err_filter;
}
pr_info("Using i8042 filter function for receiving events\n");
}
return 0;
err_filter:
if (wwan_rfkill)
rfkill_unregister(wwan_rfkill);
err_wwan:
rfkill_destroy(wwan_rfkill);
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
err_bluetooth:
rfkill_destroy(bluetooth_rfkill);
if (wifi_rfkill)
rfkill_unregister(wifi_rfkill);
err_wifi:
rfkill_destroy(wifi_rfkill);
return ret;
}
static void dell_cleanup_rfkill(void)
{
if (dell_rbtn_notifier_unregister_func) {
dell_rbtn_notifier_unregister_func(&dell_laptop_rbtn_notifier);
symbol_put(dell_rbtn_notifier_unregister);
dell_rbtn_notifier_unregister_func = NULL;
} else {
i8042_remove_filter(dell_laptop_i8042_filter);
}
cancel_delayed_work_sync(&dell_rfkill_work);
if (wifi_rfkill) {
rfkill_unregister(wifi_rfkill);
rfkill_destroy(wifi_rfkill);
}
if (bluetooth_rfkill) {
rfkill_unregister(bluetooth_rfkill);
rfkill_destroy(bluetooth_rfkill);
}
if (wwan_rfkill) {
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
}
static int dell_send_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int ret;
token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
if (!token)
return -ENODEV;
dell_fill_request(&buffer,
token->location, bd->props.brightness, 0, 0);
if (power_supply_is_system_supplied() > 0)
ret = dell_send_request(&buffer,
CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
else
ret = dell_send_request(&buffer,
CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int ret;
token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
if (!token)
return -ENODEV;
dell_fill_request(&buffer, token->location, 0, 0, 0);
if (power_supply_is_system_supplied() > 0)
ret = dell_send_request(&buffer,
CLASS_TOKEN_READ, SELECT_TOKEN_AC);
else
ret = dell_send_request(&buffer,
CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
if (ret == 0)
ret = buffer.output[1];
return ret;
}
static const struct backlight_ops dell_ops = {
.get_brightness = dell_get_intensity,
.update_status = dell_send_intensity,
};
static void touchpad_led_on(void)
{
int command = 0x97;
char data = 1;
i8042_command(&data, command | 1 << 12);
}
static void touchpad_led_off(void)
{
int command = 0x97;
char data = 2;
i8042_command(&data, command | 1 << 12);
}
static void touchpad_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value > 0)
touchpad_led_on();
else
touchpad_led_off();
}
static struct led_classdev touchpad_led = {
.name = "dell-laptop::touchpad",
.brightness_set = touchpad_led_set,
.flags = LED_CORE_SUSPENDRESUME,
};
static int __init touchpad_led_init(struct device *dev)
{
return led_classdev_register(dev, &touchpad_led);
}
static void touchpad_led_exit(void)
{
led_classdev_unregister(&touchpad_led);
}
/*
* Derived from information in smbios-keyboard-ctl:
*
* cbClass 4
* cbSelect 11
* Keyboard illumination
* cbArg1 determines the function to be performed
*
* cbArg1 0x0 = Get Feature Information
* cbRES1 Standard return codes (0, -1, -2)
* cbRES2, word0 Bitmap of user-selectable modes
* bit 0 Always off (All systems)
* bit 1 Always on (Travis ATG, Siberia)
* bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
* bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
* bit 4 Auto: Input-activity-based On; input-activity based Off
* bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
* bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
* bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
* bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
* bits 9-15 Reserved for future use
* cbRES2, byte2 Reserved for future use
* cbRES2, byte3 Keyboard illumination type
* 0 Reserved
* 1 Tasklight
* 2 Backlight
* 3-255 Reserved for future use
* cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
* bit 0 Any keystroke
* bit 1 Touchpad activity
* bit 2 Pointing stick
* bit 3 Any mouse
* bits 4-7 Reserved for future use
* cbRES3, byte1 Supported timeout unit bitmap
* bit 0 Seconds
* bit 1 Minutes
* bit 2 Hours
* bit 3 Days
* bits 4-7 Reserved for future use
* cbRES3, byte2 Number of keyboard light brightness levels
* cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
* cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
* cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
* cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
*
* cbArg1 0x1 = Get Current State
* cbRES1 Standard return codes (0, -1, -2)
* cbRES2, word0 Bitmap of current mode state
* bit 0 Always off (All systems)
* bit 1 Always on (Travis ATG, Siberia)
* bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
* bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
* bit 4 Auto: Input-activity-based On; input-activity based Off
* bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
* bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
* bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
* bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
* bits 9-15 Reserved for future use
* Note: Only One bit can be set
* cbRES2, byte2 Currently active auto keyboard illumination triggers.
* bit 0 Any keystroke
* bit 1 Touchpad activity
* bit 2 Pointing stick
* bit 3 Any mouse
* bits 4-7 Reserved for future use
* cbRES2, byte3 Current Timeout on battery
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
* 10b Hours
* 11b Days
* bits 5:0 Timeout value (0-63) in sec/min/hr/day
* NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
* are set upon return from the [Get feature information] call.
* cbRES3, byte0 Current setting of ALS value that turns the light on or off.
* cbRES3, byte1 Current ALS reading
* cbRES3, byte2 Current keyboard light level.
* cbRES3, byte3 Current timeout on AC Power
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
* 10b Hours
* 11b Days
* Bits 5:0 Timeout value (0-63) in sec/min/hr/day
* NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte2
* are set upon return from the upon return from the [Get Feature information] call.
*
* cbArg1 0x2 = Set New State
* cbRES1 Standard return codes (0, -1, -2)
* cbArg2, word0 Bitmap of current mode state
* bit 0 Always off (All systems)
* bit 1 Always on (Travis ATG, Siberia)
* bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
* bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
* bit 4 Auto: Input-activity-based On; input-activity based Off
* bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
* bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
* bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
* bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
* bits 9-15 Reserved for future use
* Note: Only One bit can be set
* cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
* keyboard to turn off automatically.
* bit 0 Any keystroke
* bit 1 Touchpad activity
* bit 2 Pointing stick
* bit 3 Any mouse
* bits 4-7 Reserved for future use
* cbArg2, byte3 Desired Timeout on battery
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
* 10b Hours
* 11b Days
* bits 5:0 Timeout value (0-63) in sec/min/hr/day
* cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
* cbArg3, byte2 Desired keyboard light level.
* cbArg3, byte3 Desired Timeout on AC power
* bits 7:6 Timeout units indicator:
* 00b Seconds
* 01b Minutes
* 10b Hours
* 11b Days
* bits 5:0 Timeout value (0-63) in sec/min/hr/day
*/
enum kbd_timeout_unit {
KBD_TIMEOUT_SECONDS = 0,
KBD_TIMEOUT_MINUTES,
KBD_TIMEOUT_HOURS,
KBD_TIMEOUT_DAYS,
};
enum kbd_mode_bit {
KBD_MODE_BIT_OFF = 0,
KBD_MODE_BIT_ON,
KBD_MODE_BIT_ALS,
KBD_MODE_BIT_TRIGGER_ALS,
KBD_MODE_BIT_TRIGGER,
KBD_MODE_BIT_TRIGGER_25,
KBD_MODE_BIT_TRIGGER_50,
KBD_MODE_BIT_TRIGGER_75,
KBD_MODE_BIT_TRIGGER_100,
};
#define kbd_is_als_mode_bit(bit) \
((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
#define kbd_is_trigger_mode_bit(bit) \
((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
#define kbd_is_level_mode_bit(bit) \
((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
struct kbd_info {
u16 modes;
u8 type;
u8 triggers;
u8 levels;
u8 seconds;
u8 minutes;
u8 hours;
u8 days;
};
struct kbd_state {
u8 mode_bit;
u8 triggers;
u8 timeout_value;
u8 timeout_unit;
u8 timeout_value_ac;
u8 timeout_unit_ac;
u8 als_setting;
u8 als_value;
u8 level;
};
static const int kbd_tokens[] = {
KBD_LED_OFF_TOKEN,
KBD_LED_AUTO_25_TOKEN,
KBD_LED_AUTO_50_TOKEN,
KBD_LED_AUTO_75_TOKEN,
KBD_LED_AUTO_100_TOKEN,
KBD_LED_ON_TOKEN,
};
static u16 kbd_token_bits;
static struct kbd_info kbd_info;
static bool kbd_als_supported;
static bool kbd_triggers_supported;
static bool kbd_timeout_ac_supported;
static u8 kbd_mode_levels[16];
static int kbd_mode_levels_count;
static u8 kbd_previous_level;
static u8 kbd_previous_mode_bit;
static bool kbd_led_present;
static DEFINE_MUTEX(kbd_led_mutex);
static enum led_brightness kbd_led_level;
/*
* NOTE: there are three ways to set the keyboard backlight level.
* First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
* Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
* Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
*
* There are laptops which support only one of these methods. If we want to
* support as many machines as possible we need to implement all three methods.
* The first two methods use the kbd_state structure. The third uses SMBIOS
* tokens. If kbd_info.levels == 0, the machine does not support setting the
* keyboard backlight level via kbd_state.level.
*/
static int kbd_get_info(struct kbd_info *info)
{
struct calling_interface_buffer buffer;
u8 units;
int ret;
dell_fill_request(&buffer, 0, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
if (ret)
return ret;
info->modes = buffer.output[1] & 0xFFFF;
info->type = (buffer.output[1] >> 24) & 0xFF;
info->triggers = buffer.output[2] & 0xFF;
units = (buffer.output[2] >> 8) & 0xFF;
info->levels = (buffer.output[2] >> 16) & 0xFF;
if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
info->levels--;
if (units & BIT(0))
info->seconds = (buffer.output[3] >> 0) & 0xFF;
if (units & BIT(1))
info->minutes = (buffer.output[3] >> 8) & 0xFF;
if (units & BIT(2))
info->hours = (buffer.output[3] >> 16) & 0xFF;
if (units & BIT(3))
info->days = (buffer.output[3] >> 24) & 0xFF;
return ret;
}
static unsigned int kbd_get_max_level(void)
{
if (kbd_info.levels != 0)
return kbd_info.levels;
if (kbd_mode_levels_count > 0)
return kbd_mode_levels_count - 1;
return 0;
}
static int kbd_get_level(struct kbd_state *state)
{
int i;
if (kbd_info.levels != 0)
return state->level;
if (kbd_mode_levels_count > 0) {
for (i = 0; i < kbd_mode_levels_count; ++i)
if (kbd_mode_levels[i] == state->mode_bit)
return i;
return 0;
}
return -EINVAL;
}
static int kbd_set_level(struct kbd_state *state, u8 level)
{
if (kbd_info.levels != 0) {
if (level != 0)
kbd_previous_level = level;
if (state->level == level)
return 0;
state->level = level;
if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
state->mode_bit = kbd_previous_mode_bit;
else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
kbd_previous_mode_bit = state->mode_bit;
state->mode_bit = KBD_MODE_BIT_OFF;
}
return 0;
}
if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
if (level != 0)
kbd_previous_level = level;
state->mode_bit = kbd_mode_levels[level];
return 0;
}
return -EINVAL;
}
static int kbd_get_state(struct kbd_state *state)
{
struct calling_interface_buffer buffer;
int ret;
dell_fill_request(&buffer, 0x1, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
if (ret)
return ret;
state->mode_bit = ffs(buffer.output[1] & 0xFFFF);
if (state->mode_bit != 0)
state->mode_bit--;
state->triggers = (buffer.output[1] >> 16) & 0xFF;
state->timeout_value = (buffer.output[1] >> 24) & 0x3F;
state->timeout_unit = (buffer.output[1] >> 30) & 0x3;
state->als_setting = buffer.output[2] & 0xFF;
state->als_value = (buffer.output[2] >> 8) & 0xFF;
state->level = (buffer.output[2] >> 16) & 0xFF;
state->timeout_value_ac = (buffer.output[2] >> 24) & 0x3F;
state->timeout_unit_ac = (buffer.output[2] >> 30) & 0x3;
return ret;
}
static int kbd_set_state(struct kbd_state *state)
{
struct calling_interface_buffer buffer;
int ret;
u32 input1;
u32 input2;
input1 = BIT(state->mode_bit) & 0xFFFF;
input1 |= (state->triggers & 0xFF) << 16;
input1 |= (state->timeout_value & 0x3F) << 24;
input1 |= (state->timeout_unit & 0x3) << 30;
input2 = state->als_setting & 0xFF;
input2 |= (state->level & 0xFF) << 16;
input2 |= (state->timeout_value_ac & 0x3F) << 24;
input2 |= (state->timeout_unit_ac & 0x3) << 30;
dell_fill_request(&buffer, 0x2, input1, input2, 0);
ret = dell_send_request(&buffer,
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
return ret;
}
static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
{
int ret;
ret = kbd_set_state(state);
if (ret == 0)
return 0;
/*
* When setting the new state fails,try to restore the previous one.
* This is needed on some machines where BIOS sets a default state when
* setting a new state fails. This default state could be all off.
*/
if (kbd_set_state(old))
pr_err("Setting old previous keyboard state failed\n");
return ret;
}
static int kbd_set_token_bit(u8 bit)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int ret;
if (bit >= ARRAY_SIZE(kbd_tokens))
return -EINVAL;
token = dell_smbios_find_token(kbd_tokens[bit]);
if (!token)
return -EINVAL;
dell_fill_request(&buffer, token->location, token->value, 0, 0);
ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
return ret;
}
static int kbd_get_token_bit(u8 bit)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int ret;
int val;
if (bit >= ARRAY_SIZE(kbd_tokens))
return -EINVAL;
token = dell_smbios_find_token(kbd_tokens[bit]);
if (!token)
return -EINVAL;
dell_fill_request(&buffer, token->location, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD);
val = buffer.output[1];
if (ret)
return ret;
return (val == token->value);
}
static int kbd_get_first_active_token_bit(void)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
ret = kbd_get_token_bit(i);
if (ret == 1)
return i;
}
return ret;
}
static int kbd_get_valid_token_counts(void)
{
return hweight16(kbd_token_bits);
}
static inline int kbd_init_info(void)
{
struct kbd_state state;
int ret;
int i;
ret = kbd_get_info(&kbd_info);
if (ret)
return ret;
/* NOTE: Old models without KBD_LED_AC_TOKEN token supports only one
* timeout value which is shared for both battery and AC power
* settings. So do not try to set AC values on old models.
*/
if ((quirks && quirks->kbd_missing_ac_tag) ||
dell_smbios_find_token(KBD_LED_AC_TOKEN))
kbd_timeout_ac_supported = true;
kbd_get_state(&state);
/* NOTE: timeout value is stored in 6 bits so max value is 63 */
if (kbd_info.seconds > 63)
kbd_info.seconds = 63;
if (kbd_info.minutes > 63)
kbd_info.minutes = 63;
if (kbd_info.hours > 63)
kbd_info.hours = 63;
if (kbd_info.days > 63)
kbd_info.days = 63;
/* NOTE: On tested machines ON mode did not work and caused
* problems (turned backlight off) so do not use it
*/
kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
kbd_previous_level = kbd_get_level(&state);
kbd_previous_mode_bit = state.mode_bit;
if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
kbd_previous_level = 1;
if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
kbd_previous_mode_bit =
ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
if (kbd_previous_mode_bit != 0)
kbd_previous_mode_bit--;
}
if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
BIT(KBD_MODE_BIT_TRIGGER_ALS)))
kbd_als_supported = true;
if (kbd_info.modes & (
BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
))
kbd_triggers_supported = true;
/* kbd_mode_levels[0] is reserved, see below */
for (i = 0; i < 16; ++i)
if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
/*
* Find the first supported mode and assign to kbd_mode_levels[0].
* This should be 0 (off), but we cannot depend on the BIOS to
* support 0.
*/
if (kbd_mode_levels_count > 0) {
for (i = 0; i < 16; ++i) {
if (BIT(i) & kbd_info.modes) {
kbd_mode_levels[0] = i;
break;
}
}
kbd_mode_levels_count++;
}
return 0;
}
static inline void kbd_init_tokens(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
if (dell_smbios_find_token(kbd_tokens[i]))
kbd_token_bits |= BIT(i);
}
static void kbd_init(void)
{
int ret;
if (quirks && quirks->kbd_led_not_present)
return;
ret = kbd_init_info();
kbd_init_tokens();
/*
* Only supports keyboard backlight when it has at least two modes.
*/
if ((ret == 0 && (kbd_info.levels != 0 || kbd_mode_levels_count >= 2))
|| kbd_get_valid_token_counts() >= 2)
kbd_led_present = true;
}
static ssize_t kbd_led_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbd_state new_state;
struct kbd_state state;
bool convert;
int value;
int ret;
char ch;
u8 unit;
int i;
ret = sscanf(buf, "%d %c", &value, &ch);
if (ret < 1)
return -EINVAL;
else if (ret == 1)
ch = 's';
if (value < 0)
return -EINVAL;
convert = false;
switch (ch) {
case 's':
if (value > kbd_info.seconds)
convert = true;
unit = KBD_TIMEOUT_SECONDS;
break;
case 'm':
if (value > kbd_info.minutes)
convert = true;
unit = KBD_TIMEOUT_MINUTES;
break;
case 'h':
if (value > kbd_info.hours)
convert = true;
unit = KBD_TIMEOUT_HOURS;
break;
case 'd':
if (value > kbd_info.days)
convert = true;
unit = KBD_TIMEOUT_DAYS;
break;
default:
return -EINVAL;
}
if (quirks && quirks->needs_kbd_timeouts)
convert = true;
if (convert) {
/* Convert value from current units to seconds */
switch (unit) {
case KBD_TIMEOUT_DAYS:
value *= 24;
fallthrough;
case KBD_TIMEOUT_HOURS:
value *= 60;
fallthrough;
case KBD_TIMEOUT_MINUTES:
value *= 60;
unit = KBD_TIMEOUT_SECONDS;
}
if (quirks && quirks->needs_kbd_timeouts) {
for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
if (value <= quirks->kbd_timeouts[i]) {
value = quirks->kbd_timeouts[i];
break;
}
}
}
if (value <= kbd_info.seconds && kbd_info.seconds) {
unit = KBD_TIMEOUT_SECONDS;
} else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
value /= 60;
unit = KBD_TIMEOUT_MINUTES;
} else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
value /= (60 * 60);
unit = KBD_TIMEOUT_HOURS;
} else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
value /= (60 * 60 * 24);
unit = KBD_TIMEOUT_DAYS;
} else {
return -EINVAL;
}
}
mutex_lock(&kbd_led_mutex);
ret = kbd_get_state(&state);
if (ret)
goto out;
new_state = state;
if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
new_state.timeout_value_ac = value;
new_state.timeout_unit_ac = unit;
} else {
new_state.timeout_value = value;
new_state.timeout_unit = unit;
}
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
goto out;
ret = count;
out:
mutex_unlock(&kbd_led_mutex);
return ret;
}
static ssize_t kbd_led_timeout_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kbd_state state;
int value;
int ret;
int len;
u8 unit;
ret = kbd_get_state(&state);
if (ret)
return ret;
if (kbd_timeout_ac_supported && power_supply_is_system_supplied() > 0) {
value = state.timeout_value_ac;
unit = state.timeout_unit_ac;
} else {
value = state.timeout_value;
unit = state.timeout_unit;
}
len = sprintf(buf, "%d", value);
switch (unit) {
case KBD_TIMEOUT_SECONDS:
return len + sprintf(buf+len, "s\n");
case KBD_TIMEOUT_MINUTES:
return len + sprintf(buf+len, "m\n");
case KBD_TIMEOUT_HOURS:
return len + sprintf(buf+len, "h\n");
case KBD_TIMEOUT_DAYS:
return len + sprintf(buf+len, "d\n");
default:
return -EINVAL;
}
return len;
}
static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
kbd_led_timeout_show, kbd_led_timeout_store);
static const char * const kbd_led_triggers[] = {
"keyboard",
"touchpad",
/*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
"mouse",
};
static ssize_t kbd_led_triggers_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbd_state new_state;
struct kbd_state state;
bool triggers_enabled = false;
int trigger_bit = -1;
char trigger[21];
int i, ret;
ret = sscanf(buf, "%20s", trigger);
if (ret != 1)
return -EINVAL;
if (trigger[0] != '+' && trigger[0] != '-')
return -EINVAL;
mutex_lock(&kbd_led_mutex);
ret = kbd_get_state(&state);
if (ret)
goto out;
if (kbd_triggers_supported)
triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
if (kbd_triggers_supported) {
for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
if (!(kbd_info.triggers & BIT(i)))
continue;
if (!kbd_led_triggers[i])
continue;
if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
continue;
if (trigger[0] == '+' &&
triggers_enabled && (state.triggers & BIT(i))) {
ret = count;
goto out;
}
if (trigger[0] == '-' &&
(!triggers_enabled || !(state.triggers & BIT(i)))) {
ret = count;
goto out;
}
trigger_bit = i;
break;
}
}
if (trigger_bit == -1) {
ret = -EINVAL;
goto out;
}
new_state = state;
if (trigger[0] == '+')
new_state.triggers |= BIT(trigger_bit);
else {
new_state.triggers &= ~BIT(trigger_bit);
/*
* NOTE: trackstick bit (2) must be disabled when
* disabling touchpad bit (1), otherwise touchpad
* bit (1) will not be disabled
*/
if (trigger_bit == 1)
new_state.triggers &= ~BIT(2);
}
if ((kbd_info.triggers & new_state.triggers) !=
new_state.triggers) {
ret = -EINVAL;
goto out;
}
if (new_state.triggers && !triggers_enabled) {
new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
kbd_set_level(&new_state, kbd_previous_level);
} else if (new_state.triggers == 0) {
kbd_set_level(&new_state, 0);
}
if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
ret = -EINVAL;
goto out;
}
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
goto out;
if (new_state.mode_bit != KBD_MODE_BIT_OFF)
kbd_previous_mode_bit = new_state.mode_bit;
ret = count;
out:
mutex_unlock(&kbd_led_mutex);
return ret;
}
static ssize_t kbd_led_triggers_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct kbd_state state;
bool triggers_enabled;
int level, i, ret;
int len = 0;
ret = kbd_get_state(&state);
if (ret)
return ret;
len = 0;
if (kbd_triggers_supported) {
triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
level = kbd_get_level(&state);
for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
if (!(kbd_info.triggers & BIT(i)))
continue;
if (!kbd_led_triggers[i])
continue;
if ((triggers_enabled || level <= 0) &&
(state.triggers & BIT(i)))
buf[len++] = '+';
else
buf[len++] = '-';
len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
}
}
if (len)
buf[len - 1] = '\n';
return len;
}
static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
kbd_led_triggers_show, kbd_led_triggers_store);
static ssize_t kbd_led_als_enabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbd_state new_state;
struct kbd_state state;
bool triggers_enabled = false;
int enable;
int ret;
ret = kstrtoint(buf, 0, &enable);
if (ret)
return ret;
mutex_lock(&kbd_led_mutex);
ret = kbd_get_state(&state);
if (ret)
goto out;
if (enable == kbd_is_als_mode_bit(state.mode_bit)) {
ret = count;
goto out;
}
new_state = state;
if (kbd_triggers_supported)
triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
if (enable) {
if (triggers_enabled)
new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
else
new_state.mode_bit = KBD_MODE_BIT_ALS;
} else {
if (triggers_enabled) {
new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
kbd_set_level(&new_state, kbd_previous_level);
} else {
new_state.mode_bit = KBD_MODE_BIT_ON;
}
}
if (!(kbd_info.modes & BIT(new_state.mode_bit))) {
ret = -EINVAL;
goto out;
}
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
goto out;
kbd_previous_mode_bit = new_state.mode_bit;
ret = count;
out:
mutex_unlock(&kbd_led_mutex);
return ret;
}
static ssize_t kbd_led_als_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kbd_state state;
bool enabled = false;
int ret;
ret = kbd_get_state(&state);
if (ret)
return ret;
enabled = kbd_is_als_mode_bit(state.mode_bit);
return sprintf(buf, "%d\n", enabled ? 1 : 0);
}
static DEVICE_ATTR(als_enabled, S_IRUGO | S_IWUSR,
kbd_led_als_enabled_show, kbd_led_als_enabled_store);
static ssize_t kbd_led_als_setting_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct kbd_state state;
struct kbd_state new_state;
u8 setting;
int ret;
ret = kstrtou8(buf, 10, &setting);
if (ret)
return ret;
mutex_lock(&kbd_led_mutex);
ret = kbd_get_state(&state);
if (ret)
goto out;
new_state = state;
new_state.als_setting = setting;
ret = kbd_set_state_safe(&new_state, &state);
if (ret)
goto out;
ret = count;
out:
mutex_unlock(&kbd_led_mutex);
return ret;
}
static ssize_t kbd_led_als_setting_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kbd_state state;
int ret;
ret = kbd_get_state(&state);
if (ret)
return ret;
return sprintf(buf, "%d\n", state.als_setting);
}
static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
kbd_led_als_setting_show, kbd_led_als_setting_store);
static struct attribute *kbd_led_attrs[] = {
&dev_attr_stop_timeout.attr,
&dev_attr_start_triggers.attr,
NULL,
};
static const struct attribute_group kbd_led_group = {
.attrs = kbd_led_attrs,
};
static struct attribute *kbd_led_als_attrs[] = {
&dev_attr_als_enabled.attr,
&dev_attr_als_setting.attr,
NULL,
};
static const struct attribute_group kbd_led_als_group = {
.attrs = kbd_led_als_attrs,
};
static const struct attribute_group *kbd_led_groups[] = {
&kbd_led_group,
&kbd_led_als_group,
NULL,
};
static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
{
int ret;
u16 num;
struct kbd_state state;
if (kbd_get_max_level()) {
ret = kbd_get_state(&state);
if (ret)
return 0;
ret = kbd_get_level(&state);
if (ret < 0)
return 0;
return ret;
}
if (kbd_get_valid_token_counts()) {
ret = kbd_get_first_active_token_bit();
if (ret < 0)
return 0;
for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
num &= num - 1; /* clear the first bit set */
if (num == 0)
return 0;
return ffs(num) - 1;
}
pr_warn("Keyboard brightness level control not supported\n");
return 0;
}
static int kbd_led_level_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
enum led_brightness new_value = value;
struct kbd_state state;
struct kbd_state new_state;
u16 num;
int ret;
mutex_lock(&kbd_led_mutex);
if (kbd_get_max_level()) {
ret = kbd_get_state(&state);
if (ret)
goto out;
new_state = state;
ret = kbd_set_level(&new_state, value);
if (ret)
goto out;
ret = kbd_set_state_safe(&new_state, &state);
} else if (kbd_get_valid_token_counts()) {
for (num = kbd_token_bits; num != 0 && value > 0; --value)
num &= num - 1; /* clear the first bit set */
if (num == 0)
ret = 0;
else
ret = kbd_set_token_bit(ffs(num) - 1);
} else {
pr_warn("Keyboard brightness level control not supported\n");
ret = -ENXIO;
}
out:
if (ret == 0)
kbd_led_level = new_value;
mutex_unlock(&kbd_led_mutex);
return ret;
}
static struct led_classdev kbd_led = {
.name = "dell::kbd_backlight",
.flags = LED_BRIGHT_HW_CHANGED,
.brightness_set_blocking = kbd_led_level_set,
.brightness_get = kbd_led_level_get,
.groups = kbd_led_groups,
};
static int __init kbd_led_init(struct device *dev)
{
int ret;
kbd_init();
if (!kbd_led_present)
return -ENODEV;
if (!kbd_als_supported)
kbd_led_groups[1] = NULL;
kbd_led.max_brightness = kbd_get_max_level();
if (!kbd_led.max_brightness) {
kbd_led.max_brightness = kbd_get_valid_token_counts();
if (kbd_led.max_brightness)
kbd_led.max_brightness--;
}
kbd_led_level = kbd_led_level_get(NULL);
ret = led_classdev_register(dev, &kbd_led);
if (ret)
kbd_led_present = false;
return ret;
}
static void brightness_set_exit(struct led_classdev *led_cdev,
enum led_brightness value)
{
/* Don't change backlight level on exit */
};
static void kbd_led_exit(void)
{
if (!kbd_led_present)
return;
kbd_led.brightness_set = brightness_set_exit;
led_classdev_unregister(&kbd_led);
}
static int dell_laptop_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
bool changed = false;
enum led_brightness new_kbd_led_level;
switch (action) {
case DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED:
if (!kbd_led_present)
break;
mutex_lock(&kbd_led_mutex);
new_kbd_led_level = kbd_led_level_get(&kbd_led);
if (kbd_led_level != new_kbd_led_level) {
kbd_led_level = new_kbd_led_level;
changed = true;
}
mutex_unlock(&kbd_led_mutex);
if (changed)
led_classdev_notify_brightness_hw_changed(&kbd_led,
kbd_led_level);
break;
}
return NOTIFY_OK;
}
static struct notifier_block dell_laptop_notifier = {
.notifier_call = dell_laptop_notifier_call,
};
static int micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int state = brightness != LED_OFF;
if (state == 0)
token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
else
token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
if (!token)
return -ENODEV;
dell_fill_request(&buffer, token->location, token->value, 0, 0);
dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
return 0;
}
static struct led_classdev micmute_led_cdev = {
.name = "platform::micmute",
.max_brightness = 1,
.brightness_set_blocking = micmute_led_set,
.default_trigger = "audio-micmute",
};
static int mute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
int state = brightness != LED_OFF;
if (state == 0)
token = dell_smbios_find_token(GLOBAL_MUTE_DISABLE);
else
token = dell_smbios_find_token(GLOBAL_MUTE_ENABLE);
if (!token)
return -ENODEV;
dell_fill_request(&buffer, token->location, token->value, 0, 0);
dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
return 0;
}
static struct led_classdev mute_led_cdev = {
.name = "platform::mute",
.max_brightness = 1,
.brightness_set_blocking = mute_led_set,
.default_trigger = "audio-mute",
};
static int __init dell_init(void)
{
struct calling_interface_token *token;
int max_intensity = 0;
int ret;
if (!dmi_check_system(dell_device_table))
return -ENODEV;
quirks = NULL;
/* find if this machine support other functions */
dmi_check_system(dell_quirks);
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
platform_device = platform_device_alloc("dell-laptop", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
}
ret = platform_device_add(platform_device);
if (ret)
goto fail_platform_device2;
ret = dell_setup_rfkill();
if (ret) {
pr_warn("Unable to setup rfkill\n");
goto fail_rfkill;
}
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
kbd_led_init(&platform_device->dev);
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
&dell_debugfs_fops);
dell_laptop_register_notifier(&dell_laptop_notifier);
if (dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE) &&
!dell_privacy_has_mic_mute()) {
micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
if (ret < 0)
goto fail_led;
micmute_led_registered = true;
}
if (dell_smbios_find_token(GLOBAL_MUTE_DISABLE) &&
dell_smbios_find_token(GLOBAL_MUTE_ENABLE)) {
mute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MUTE);
ret = led_classdev_register(&platform_device->dev, &mute_led_cdev);
if (ret < 0)
goto fail_backlight;
mute_led_registered = true;
}
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
if (token) {
struct calling_interface_buffer buffer;
dell_fill_request(&buffer, token->location, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_TOKEN_READ, SELECT_TOKEN_AC);
if (ret == 0)
max_intensity = buffer.output[3];
}
if (max_intensity) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_intensity;
dell_backlight_device = backlight_device_register("dell_backlight",
&platform_device->dev,
NULL,
&dell_ops,
&props);
if (IS_ERR(dell_backlight_device)) {
ret = PTR_ERR(dell_backlight_device);
dell_backlight_device = NULL;
goto fail_backlight;
}
dell_backlight_device->props.brightness =
dell_get_intensity(dell_backlight_device);
if (dell_backlight_device->props.brightness < 0) {
ret = dell_backlight_device->props.brightness;
goto fail_get_brightness;
}
backlight_update_status(dell_backlight_device);
}
return 0;
fail_get_brightness:
backlight_device_unregister(dell_backlight_device);
fail_backlight:
if (micmute_led_registered)
led_classdev_unregister(&micmute_led_cdev);
if (mute_led_registered)
led_classdev_unregister(&mute_led_cdev);
fail_led:
dell_cleanup_rfkill();
fail_rfkill:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
return ret;
}
static void __exit dell_exit(void)
{
dell_laptop_unregister_notifier(&dell_laptop_notifier);
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
kbd_led_exit();
backlight_device_unregister(dell_backlight_device);
if (micmute_led_registered)
led_classdev_unregister(&micmute_led_cdev);
if (mute_led_registered)
led_classdev_unregister(&mute_led_cdev);
dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
}
/* dell-rbtn.c driver export functions which will not work correctly (and could
* cause kernel crash) if they are called before dell-rbtn.c init code. This is
* not problem when dell-rbtn.c is compiled as external module. When both files
* (dell-rbtn.c and dell-laptop.c) are compiled statically into kernel, then we
* need to ensure that dell_init() will be called after initializing dell-rbtn.
* This can be achieved by late_initcall() instead module_init().
*/
late_initcall(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <[email protected]>");
MODULE_AUTHOR("Gabriele Mazzotta <[email protected]>");
MODULE_AUTHOR("Pali Rohár <[email protected]>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-laptop.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to password object type attributes under BIOS Password Object GUID for
* use with dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#include "dell-wmi-sysman.h"
enum po_properties {IS_PASS_SET = 1, MIN_PASS_LEN, MAX_PASS_LEN};
get_instance_id(po);
static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int instance_id = get_po_instance_id(kobj);
union acpi_object *obj;
ssize_t ret;
if (instance_id < 0)
return instance_id;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
if (!obj)
return -EIO;
if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) {
kfree(obj);
return -EINVAL;
}
ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value);
kfree(obj);
return ret;
}
static struct kobj_attribute po_is_pass_set = __ATTR_RO(is_enabled);
static ssize_t current_password_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
char *target = NULL;
int length;
length = strlen(buf);
if (buf[length-1] == '\n')
length--;
/* firmware does verifiation of min/max password length,
* hence only check for not exceeding MAX_BUFF here.
*/
if (length >= MAX_BUFF)
return -EINVAL;
if (strcmp(kobj->name, "Admin") == 0)
target = wmi_priv.current_admin_password;
else if (strcmp(kobj->name, "System") == 0)
target = wmi_priv.current_system_password;
if (!target)
return -EIO;
memcpy(target, buf, length);
target[length] = '\0';
return count;
}
static struct kobj_attribute po_current_password = __ATTR_WO(current_password);
static ssize_t new_password_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
char *p, *buf_cp;
int ret;
buf_cp = kstrdup(buf, GFP_KERNEL);
if (!buf_cp)
return -ENOMEM;
p = memchr(buf_cp, '\n', count);
if (p != NULL)
*p = '\0';
if (strlen(buf_cp) > MAX_BUFF) {
ret = -EINVAL;
goto out;
}
ret = set_new_password(kobj->name, buf_cp);
out:
kfree(buf_cp);
return ret ? ret : count;
}
static struct kobj_attribute po_new_password = __ATTR_WO(new_password);
attribute_n_property_show(min_password_length, po);
static struct kobj_attribute po_min_pass_length = __ATTR_RO(min_password_length);
attribute_n_property_show(max_password_length, po);
static struct kobj_attribute po_max_pass_length = __ATTR_RO(max_password_length);
static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "password\n");
}
static struct kobj_attribute po_mechanism = __ATTR_RO(mechanism);
static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
if (strcmp(kobj->name, "Admin") == 0)
return sprintf(buf, "bios-admin\n");
else if (strcmp(kobj->name, "System") == 0)
return sprintf(buf, "power-on\n");
return -EIO;
}
static struct kobj_attribute po_role = __ATTR_RO(role);
static struct attribute *po_attrs[] = {
&po_is_pass_set.attr,
&po_min_pass_length.attr,
&po_max_pass_length.attr,
&po_current_password.attr,
&po_new_password.attr,
&po_role.attr,
&po_mechanism.attr,
NULL,
};
static const struct attribute_group po_attr_group = {
.attrs = po_attrs,
};
int alloc_po_data(void)
{
int ret = 0;
wmi_priv.po_instances_count = get_instance_count(DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
wmi_priv.po_data = kcalloc(wmi_priv.po_instances_count, sizeof(struct po_data), GFP_KERNEL);
if (!wmi_priv.po_data) {
wmi_priv.po_instances_count = 0;
ret = -ENOMEM;
}
return ret;
}
/**
* populate_po_data() - Populate all properties of an instance under password object attribute
* @po_obj: ACPI object with password object data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int populate_po_data(union acpi_object *po_obj, int instance_id, struct kobject *attr_name_kobj)
{
wmi_priv.po_data[instance_id].attr_name_kobj = attr_name_kobj;
if (check_property_type(po, ATTR_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.po_data[instance_id].attribute_name,
po_obj[ATTR_NAME].string.pointer);
if (check_property_type(po, MIN_PASS_LEN, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.po_data[instance_id].min_password_length =
(uintptr_t)po_obj[MIN_PASS_LEN].string.pointer;
if (check_property_type(po, MAX_PASS_LEN, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.po_data[instance_id].max_password_length =
(uintptr_t) po_obj[MAX_PASS_LEN].string.pointer;
return sysfs_create_group(attr_name_kobj, &po_attr_group);
}
/**
* exit_po_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void exit_po_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < wmi_priv.po_instances_count; instance_id++) {
if (wmi_priv.po_data[instance_id].attr_name_kobj)
sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj,
&po_attr_group);
}
wmi_priv.po_instances_count = 0;
kfree(wmi_priv.po_data);
wmi_priv.po_data = NULL;
}
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to integer type attributes under BIOS Integer GUID for use with
* dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#include "dell-wmi-sysman.h"
enum int_properties {MIN_VALUE = 6, MAX_VALUE, SCALAR_INCR};
get_instance_id(integer);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_integer_instance_id(kobj);
union acpi_object *obj;
ssize_t ret;
if (instance_id < 0)
return instance_id;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
if (!obj)
return -EIO;
if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) {
kfree(obj);
return -EINVAL;
}
ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value);
kfree(obj);
return ret;
}
/**
* validate_integer_input() - Validate input of current_value against lower and upper bound
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_integer_input(int instance_id, char *buf)
{
int in_val;
int ret;
ret = kstrtoint(buf, 0, &in_val);
if (ret)
return ret;
if (in_val < wmi_priv.integer_data[instance_id].min_value ||
in_val > wmi_priv.integer_data[instance_id].max_value)
return -EINVAL;
/* workaround for BIOS error.
* validate input to avoid setting 0 when integer input passed with + sign
*/
if (*buf == '+')
memmove(buf, (buf + 1), strlen(buf + 1) + 1);
return ret;
}
attribute_s_property_show(display_name_language_code, integer);
static struct kobj_attribute integer_displ_langcode =
__ATTR_RO(display_name_language_code);
attribute_s_property_show(display_name, integer);
static struct kobj_attribute integer_displ_name =
__ATTR_RO(display_name);
attribute_n_property_show(default_value, integer);
static struct kobj_attribute integer_default_val =
__ATTR_RO(default_value);
attribute_property_store(current_value, integer);
static struct kobj_attribute integer_current_val =
__ATTR_RW_MODE(current_value, 0600);
attribute_s_property_show(dell_modifier, integer);
static struct kobj_attribute integer_modifier =
__ATTR_RO(dell_modifier);
attribute_n_property_show(min_value, integer);
static struct kobj_attribute integer_lower_bound =
__ATTR_RO(min_value);
attribute_n_property_show(max_value, integer);
static struct kobj_attribute integer_upper_bound =
__ATTR_RO(max_value);
attribute_n_property_show(scalar_increment, integer);
static struct kobj_attribute integer_scalar_increment =
__ATTR_RO(scalar_increment);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "integer\n");
}
static struct kobj_attribute integer_type =
__ATTR_RO(type);
static struct attribute *integer_attrs[] = {
&integer_displ_langcode.attr,
&integer_displ_name.attr,
&integer_default_val.attr,
&integer_current_val.attr,
&integer_modifier.attr,
&integer_lower_bound.attr,
&integer_upper_bound.attr,
&integer_scalar_increment.attr,
&integer_type.attr,
NULL,
};
static const struct attribute_group integer_attr_group = {
.attrs = integer_attrs,
};
int alloc_int_data(void)
{
int ret = 0;
wmi_priv.integer_instances_count = get_instance_count(DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
wmi_priv.integer_data = kcalloc(wmi_priv.integer_instances_count,
sizeof(struct integer_data), GFP_KERNEL);
if (!wmi_priv.integer_data) {
wmi_priv.integer_instances_count = 0;
ret = -ENOMEM;
}
return ret;
}
/**
* populate_int_data() - Populate all properties of an instance under integer attribute
* @integer_obj: ACPI object with integer data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int populate_int_data(union acpi_object *integer_obj, int instance_id,
struct kobject *attr_name_kobj)
{
wmi_priv.integer_data[instance_id].attr_name_kobj = attr_name_kobj;
if (check_property_type(integer, ATTR_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].attribute_name,
integer_obj[ATTR_NAME].string.pointer);
if (check_property_type(integer, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].display_name_language_code,
integer_obj[DISPL_NAME_LANG_CODE].string.pointer);
if (check_property_type(integer, DISPLAY_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].display_name,
integer_obj[DISPLAY_NAME].string.pointer);
if (check_property_type(integer, DEFAULT_VAL, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.integer_data[instance_id].default_value =
(uintptr_t)integer_obj[DEFAULT_VAL].string.pointer;
if (check_property_type(integer, MODIFIER, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.integer_data[instance_id].dell_modifier,
integer_obj[MODIFIER].string.pointer);
if (check_property_type(integer, MIN_VALUE, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.integer_data[instance_id].min_value =
(uintptr_t)integer_obj[MIN_VALUE].string.pointer;
if (check_property_type(integer, MAX_VALUE, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.integer_data[instance_id].max_value =
(uintptr_t)integer_obj[MAX_VALUE].string.pointer;
if (check_property_type(integer, SCALAR_INCR, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.integer_data[instance_id].scalar_increment =
(uintptr_t)integer_obj[SCALAR_INCR].string.pointer;
return sysfs_create_group(attr_name_kobj, &integer_attr_group);
}
/**
* exit_int_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void exit_int_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < wmi_priv.integer_instances_count; instance_id++) {
if (wmi_priv.integer_data[instance_id].attr_name_kobj)
sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj,
&integer_attr_group);
}
wmi_priv.integer_instances_count = 0;
kfree(wmi_priv.integer_data);
wmi_priv.integer_data = NULL;
}
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to SET password methods under BIOS attributes interface GUID
*
* Copyright (c) 2020 Dell Inc.
*/
#include <linux/wmi.h>
#include "dell-wmi-sysman.h"
static int call_password_interface(struct wmi_device *wdev, char *in_args, size_t size)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer input;
union acpi_object *obj;
acpi_status status;
int ret = -EIO;
input.length = (acpi_size) size;
input.pointer = in_args;
status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj->type == ACPI_TYPE_INTEGER)
ret = obj->integer.value;
kfree(output.pointer);
/* let userland know it may need to check is_password_set again */
kobject_uevent(&wmi_priv.class_dev->kobj, KOBJ_CHANGE);
return map_wmi_error(ret);
}
/**
* set_new_password() - Sets a system admin password
* @password_type: The type of password to set
* @new: The new password
*
* Sets the password using plaintext interface
*/
int set_new_password(const char *password_type, const char *new)
{
size_t password_type_size, current_password_size, new_size;
size_t security_area_size, buffer_size;
char *buffer = NULL, *start;
char *current_password;
int ret;
mutex_lock(&wmi_priv.mutex);
if (!wmi_priv.password_attr_wdev) {
ret = -ENODEV;
goto out;
}
if (strcmp(password_type, "Admin") == 0) {
current_password = wmi_priv.current_admin_password;
} else if (strcmp(password_type, "System") == 0) {
current_password = wmi_priv.current_system_password;
} else {
ret = -EINVAL;
dev_err(&wmi_priv.password_attr_wdev->dev, "unknown password type %s\n",
password_type);
goto out;
}
/* build/calculate buffer */
security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
password_type_size = calculate_string_buffer(password_type);
current_password_size = calculate_string_buffer(current_password);
new_size = calculate_string_buffer(new);
buffer_size = security_area_size + password_type_size + current_password_size + new_size;
buffer = kzalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
/* build security area */
populate_security_buffer(buffer, wmi_priv.current_admin_password);
/* build variables to set */
start = buffer + security_area_size;
ret = populate_string_buffer(start, password_type_size, password_type);
if (ret < 0)
goto out;
start += ret;
ret = populate_string_buffer(start, current_password_size, current_password);
if (ret < 0)
goto out;
start += ret;
ret = populate_string_buffer(start, new_size, new);
if (ret < 0)
goto out;
print_hex_dump_bytes("set new password data: ", DUMP_PREFIX_NONE, buffer, buffer_size);
ret = call_password_interface(wmi_priv.password_attr_wdev, buffer, buffer_size);
/* on success copy the new password to current password */
if (!ret)
strscpy(current_password, new, MAX_BUFF);
/* explain to user the detailed failure reason */
else if (ret == -EOPNOTSUPP)
dev_err(&wmi_priv.password_attr_wdev->dev, "admin password must be configured\n");
else if (ret == -EACCES)
dev_err(&wmi_priv.password_attr_wdev->dev, "invalid password\n");
out:
kfree(buffer);
mutex_unlock(&wmi_priv.mutex);
return ret;
}
static int bios_attr_pass_interface_probe(struct wmi_device *wdev, const void *context)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.password_attr_wdev = wdev;
mutex_unlock(&wmi_priv.mutex);
return 0;
}
static void bios_attr_pass_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.password_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
}
static const struct wmi_device_id bios_attr_pass_interface_id_table[] = {
{ .guid_string = DELL_WMI_BIOS_PASSWORD_INTERFACE_GUID },
{ },
};
static struct wmi_driver bios_attr_pass_interface_driver = {
.driver = {
.name = DRIVER_NAME"-password"
},
.probe = bios_attr_pass_interface_probe,
.remove = bios_attr_pass_interface_remove,
.id_table = bios_attr_pass_interface_id_table,
};
int init_bios_attr_pass_interface(void)
{
return wmi_driver_register(&bios_attr_pass_interface_driver);
}
void exit_bios_attr_pass_interface(void)
{
wmi_driver_unregister(&bios_attr_pass_interface_driver);
}
MODULE_DEVICE_TABLE(wmi, bios_attr_pass_interface_id_table);
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/passwordattr-interface.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to string type attributes under BIOS String GUID for use with
* dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#include "dell-wmi-sysman.h"
enum string_properties {MIN_LEN = 6, MAX_LEN};
get_instance_id(str);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_str_instance_id(kobj);
union acpi_object *obj;
ssize_t ret;
if (instance_id < 0)
return -EIO;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
if (!obj)
return -EIO;
if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
kfree(obj);
return -EINVAL;
}
ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
kfree(obj);
return ret;
}
/**
* validate_str_input() - Validate input of current_value against min and max lengths
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_str_input(int instance_id, const char *buf)
{
int in_len = strlen(buf);
if ((in_len < wmi_priv.str_data[instance_id].min_length) ||
(in_len > wmi_priv.str_data[instance_id].max_length))
return -EINVAL;
return 0;
}
attribute_s_property_show(display_name_language_code, str);
static struct kobj_attribute str_displ_langcode =
__ATTR_RO(display_name_language_code);
attribute_s_property_show(display_name, str);
static struct kobj_attribute str_displ_name =
__ATTR_RO(display_name);
attribute_s_property_show(default_value, str);
static struct kobj_attribute str_default_val =
__ATTR_RO(default_value);
attribute_property_store(current_value, str);
static struct kobj_attribute str_current_val =
__ATTR_RW_MODE(current_value, 0600);
attribute_s_property_show(dell_modifier, str);
static struct kobj_attribute str_modifier =
__ATTR_RO(dell_modifier);
attribute_n_property_show(min_length, str);
static struct kobj_attribute str_min_length =
__ATTR_RO(min_length);
attribute_n_property_show(max_length, str);
static struct kobj_attribute str_max_length =
__ATTR_RO(max_length);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "string\n");
}
static struct kobj_attribute str_type =
__ATTR_RO(type);
static struct attribute *str_attrs[] = {
&str_displ_langcode.attr,
&str_displ_name.attr,
&str_default_val.attr,
&str_current_val.attr,
&str_modifier.attr,
&str_min_length.attr,
&str_max_length.attr,
&str_type.attr,
NULL,
};
static const struct attribute_group str_attr_group = {
.attrs = str_attrs,
};
int alloc_str_data(void)
{
int ret = 0;
wmi_priv.str_instances_count = get_instance_count(DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
wmi_priv.str_data = kcalloc(wmi_priv.str_instances_count,
sizeof(struct str_data), GFP_KERNEL);
if (!wmi_priv.str_data) {
wmi_priv.str_instances_count = 0;
ret = -ENOMEM;
}
return ret;
}
/**
* populate_str_data() - Populate all properties of an instance under string attribute
* @str_obj: ACPI object with string data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
*/
int populate_str_data(union acpi_object *str_obj, int instance_id, struct kobject *attr_name_kobj)
{
wmi_priv.str_data[instance_id].attr_name_kobj = attr_name_kobj;
if (check_property_type(str, ATTR_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].attribute_name,
str_obj[ATTR_NAME].string.pointer);
if (check_property_type(str, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].display_name_language_code,
str_obj[DISPL_NAME_LANG_CODE].string.pointer);
if (check_property_type(str, DISPLAY_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].display_name,
str_obj[DISPLAY_NAME].string.pointer);
if (check_property_type(str, DEFAULT_VAL, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].default_value,
str_obj[DEFAULT_VAL].string.pointer);
if (check_property_type(str, MODIFIER, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.str_data[instance_id].dell_modifier,
str_obj[MODIFIER].string.pointer);
if (check_property_type(str, MIN_LEN, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.str_data[instance_id].min_length = (uintptr_t)str_obj[MIN_LEN].string.pointer;
if (check_property_type(str, MAX_LEN, ACPI_TYPE_INTEGER))
return -EINVAL;
wmi_priv.str_data[instance_id].max_length = (uintptr_t) str_obj[MAX_LEN].string.pointer;
return sysfs_create_group(attr_name_kobj, &str_attr_group);
}
/**
* exit_str_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void exit_str_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < wmi_priv.str_instances_count; instance_id++) {
if (wmi_priv.str_data[instance_id].attr_name_kobj)
sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj,
&str_attr_group);
}
wmi_priv.str_instances_count = 0;
kfree(wmi_priv.str_data);
wmi_priv.str_data = NULL;
}
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to SET methods under BIOS attributes interface GUID for use
* with dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#include <linux/wmi.h>
#include "dell-wmi-sysman.h"
#define SETDEFAULTVALUES_METHOD_ID 0x02
#define SETBIOSDEFAULTS_METHOD_ID 0x03
#define SETATTRIBUTE_METHOD_ID 0x04
static int call_biosattributes_interface(struct wmi_device *wdev, char *in_args, size_t size,
int method_id)
{
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer input;
union acpi_object *obj;
acpi_status status;
int ret = -EIO;
input.length = (acpi_size) size;
input.pointer = in_args;
status = wmidev_evaluate_method(wdev, 0, method_id, &input, &output);
if (ACPI_FAILURE(status))
return -EIO;
obj = (union acpi_object *)output.pointer;
if (obj->type == ACPI_TYPE_INTEGER)
ret = obj->integer.value;
if (wmi_priv.pending_changes == 0) {
wmi_priv.pending_changes = 1;
/* let userland know it may need to check reboot pending again */
kobject_uevent(&wmi_priv.class_dev->kobj, KOBJ_CHANGE);
}
kfree(output.pointer);
return map_wmi_error(ret);
}
/**
* set_attribute() - Update an attribute value
* @a_name: The attribute name
* @a_value: The attribute value
*
* Sets an attribute to new value
*/
int set_attribute(const char *a_name, const char *a_value)
{
size_t security_area_size, buffer_size;
size_t a_name_size, a_value_size;
char *buffer = NULL, *start;
int ret;
mutex_lock(&wmi_priv.mutex);
if (!wmi_priv.bios_attr_wdev) {
ret = -ENODEV;
goto out;
}
/* build/calculate buffer */
security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
a_name_size = calculate_string_buffer(a_name);
a_value_size = calculate_string_buffer(a_value);
buffer_size = security_area_size + a_name_size + a_value_size;
buffer = kzalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
/* build security area */
populate_security_buffer(buffer, wmi_priv.current_admin_password);
/* build variables to set */
start = buffer + security_area_size;
ret = populate_string_buffer(start, a_name_size, a_name);
if (ret < 0)
goto out;
start += ret;
ret = populate_string_buffer(start, a_value_size, a_value);
if (ret < 0)
goto out;
print_hex_dump_bytes("set attribute data: ", DUMP_PREFIX_NONE, buffer, buffer_size);
ret = call_biosattributes_interface(wmi_priv.bios_attr_wdev,
buffer, buffer_size,
SETATTRIBUTE_METHOD_ID);
if (ret == -EOPNOTSUPP)
dev_err(&wmi_priv.bios_attr_wdev->dev, "admin password must be configured\n");
else if (ret == -EACCES)
dev_err(&wmi_priv.bios_attr_wdev->dev, "invalid password\n");
out:
kfree(buffer);
mutex_unlock(&wmi_priv.mutex);
return ret;
}
/**
* set_bios_defaults() - Resets BIOS defaults
* @deftype: the type of BIOS value reset to issue.
*
* Resets BIOS defaults
*/
int set_bios_defaults(u8 deftype)
{
size_t security_area_size, buffer_size;
size_t integer_area_size = sizeof(u8);
char *buffer = NULL;
u8 *defaultType;
int ret;
mutex_lock(&wmi_priv.mutex);
if (!wmi_priv.bios_attr_wdev) {
ret = -ENODEV;
goto out;
}
security_area_size = calculate_security_buffer(wmi_priv.current_admin_password);
buffer_size = security_area_size + integer_area_size;
buffer = kzalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
/* build security area */
populate_security_buffer(buffer, wmi_priv.current_admin_password);
defaultType = buffer + security_area_size;
*defaultType = deftype;
ret = call_biosattributes_interface(wmi_priv.bios_attr_wdev, buffer, buffer_size,
SETBIOSDEFAULTS_METHOD_ID);
if (ret)
dev_err(&wmi_priv.bios_attr_wdev->dev, "reset BIOS defaults failed: %d\n", ret);
kfree(buffer);
out:
mutex_unlock(&wmi_priv.mutex);
return ret;
}
static int bios_attr_set_interface_probe(struct wmi_device *wdev, const void *context)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.bios_attr_wdev = wdev;
mutex_unlock(&wmi_priv.mutex);
return 0;
}
static void bios_attr_set_interface_remove(struct wmi_device *wdev)
{
mutex_lock(&wmi_priv.mutex);
wmi_priv.bios_attr_wdev = NULL;
mutex_unlock(&wmi_priv.mutex);
}
static const struct wmi_device_id bios_attr_set_interface_id_table[] = {
{ .guid_string = DELL_WMI_BIOS_ATTRIBUTES_INTERFACE_GUID },
{ },
};
static struct wmi_driver bios_attr_set_interface_driver = {
.driver = {
.name = DRIVER_NAME
},
.probe = bios_attr_set_interface_probe,
.remove = bios_attr_set_interface_remove,
.id_table = bios_attr_set_interface_id_table,
};
int init_bios_attr_set_interface(void)
{
return wmi_driver_register(&bios_attr_set_interface_driver);
}
void exit_bios_attr_set_interface(void)
{
wmi_driver_unregister(&bios_attr_set_interface_driver);
}
MODULE_DEVICE_TABLE(wmi, bios_attr_set_interface_id_table);
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/biosattr-interface.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions corresponding to enumeration type attributes under
* BIOS Enumeration GUID for use with dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#include "dell-wmi-sysman.h"
get_instance_id(enumeration);
static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
int instance_id = get_enumeration_instance_id(kobj);
union acpi_object *obj;
ssize_t ret;
if (instance_id < 0)
return instance_id;
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
if (!obj)
return -EIO;
if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) {
kfree(obj);
return -EINVAL;
}
ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer);
kfree(obj);
return ret;
}
/**
* validate_enumeration_input() - Validate input of current_value against possible values
* @instance_id: The instance on which input is validated
* @buf: Input value
*/
static int validate_enumeration_input(int instance_id, const char *buf)
{
char *options, *tmp, *p;
int ret = -EINVAL;
options = tmp = kstrdup(wmi_priv.enumeration_data[instance_id].possible_values,
GFP_KERNEL);
if (!options)
return -ENOMEM;
while ((p = strsep(&options, ";")) != NULL) {
if (!*p)
continue;
if (!strcasecmp(p, buf)) {
ret = 0;
break;
}
}
kfree(tmp);
return ret;
}
attribute_s_property_show(display_name_language_code, enumeration);
static struct kobj_attribute displ_langcode =
__ATTR_RO(display_name_language_code);
attribute_s_property_show(display_name, enumeration);
static struct kobj_attribute displ_name =
__ATTR_RO(display_name);
attribute_s_property_show(default_value, enumeration);
static struct kobj_attribute default_val =
__ATTR_RO(default_value);
attribute_property_store(current_value, enumeration);
static struct kobj_attribute current_val =
__ATTR_RW_MODE(current_value, 0600);
attribute_s_property_show(dell_modifier, enumeration);
static struct kobj_attribute modifier =
__ATTR_RO(dell_modifier);
attribute_s_property_show(dell_value_modifier, enumeration);
static struct kobj_attribute value_modfr =
__ATTR_RO(dell_value_modifier);
attribute_s_property_show(possible_values, enumeration);
static struct kobj_attribute poss_val =
__ATTR_RO(possible_values);
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "enumeration\n");
}
static struct kobj_attribute type =
__ATTR_RO(type);
static struct attribute *enumeration_attrs[] = {
&displ_langcode.attr,
&displ_name.attr,
&default_val.attr,
¤t_val.attr,
&modifier.attr,
&value_modfr.attr,
&poss_val.attr,
&type.attr,
NULL,
};
static const struct attribute_group enumeration_attr_group = {
.attrs = enumeration_attrs,
};
int alloc_enum_data(void)
{
int ret = 0;
wmi_priv.enumeration_instances_count =
get_instance_count(DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
wmi_priv.enumeration_data = kcalloc(wmi_priv.enumeration_instances_count,
sizeof(struct enumeration_data), GFP_KERNEL);
if (!wmi_priv.enumeration_data) {
wmi_priv.enumeration_instances_count = 0;
ret = -ENOMEM;
}
return ret;
}
/**
* populate_enum_data() - Populate all properties of an instance under enumeration attribute
* @enumeration_obj: ACPI object with enumeration data
* @instance_id: The instance to enumerate
* @attr_name_kobj: The parent kernel object
* @enum_property_count: Total properties count under enumeration type
*/
int populate_enum_data(union acpi_object *enumeration_obj, int instance_id,
struct kobject *attr_name_kobj, u32 enum_property_count)
{
int i, next_obj, value_modifier_count, possible_values_count;
wmi_priv.enumeration_data[instance_id].attr_name_kobj = attr_name_kobj;
if (check_property_type(enumeration, ATTR_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].attribute_name,
enumeration_obj[ATTR_NAME].string.pointer);
if (check_property_type(enumeration, DISPL_NAME_LANG_CODE, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name_language_code,
enumeration_obj[DISPL_NAME_LANG_CODE].string.pointer);
if (check_property_type(enumeration, DISPLAY_NAME, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].display_name,
enumeration_obj[DISPLAY_NAME].string.pointer);
if (check_property_type(enumeration, DEFAULT_VAL, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].default_value,
enumeration_obj[DEFAULT_VAL].string.pointer);
if (check_property_type(enumeration, MODIFIER, ACPI_TYPE_STRING))
return -EINVAL;
strlcpy_attr(wmi_priv.enumeration_data[instance_id].dell_modifier,
enumeration_obj[MODIFIER].string.pointer);
next_obj = MODIFIER + 1;
if (next_obj >= enum_property_count)
return -EINVAL;
if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
return -EINVAL;
value_modifier_count = (uintptr_t)enumeration_obj[next_obj++].string.pointer;
for (i = 0; i < value_modifier_count; i++) {
if (next_obj >= enum_property_count)
return -EINVAL;
if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
return -EINVAL;
strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier,
enumeration_obj[next_obj++].string.pointer);
strcat(wmi_priv.enumeration_data[instance_id].dell_value_modifier, ";");
}
if (next_obj >= enum_property_count)
return -EINVAL;
if (check_property_type(enumeration, next_obj, ACPI_TYPE_INTEGER))
return -EINVAL;
possible_values_count = (uintptr_t) enumeration_obj[next_obj++].string.pointer;
for (i = 0; i < possible_values_count; i++) {
if (next_obj >= enum_property_count)
return -EINVAL;
if (check_property_type(enumeration, next_obj, ACPI_TYPE_STRING))
return -EINVAL;
strcat(wmi_priv.enumeration_data[instance_id].possible_values,
enumeration_obj[next_obj++].string.pointer);
strcat(wmi_priv.enumeration_data[instance_id].possible_values, ";");
}
return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
}
/**
* exit_enum_attributes() - Clear all attribute data
*
* Clears all data allocated for this group of attributes
*/
void exit_enum_attributes(void)
{
int instance_id;
for (instance_id = 0; instance_id < wmi_priv.enumeration_instances_count; instance_id++) {
if (wmi_priv.enumeration_data[instance_id].attr_name_kobj)
sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj,
&enumeration_attr_group);
}
wmi_priv.enumeration_instances_count = 0;
kfree(wmi_priv.enumeration_data);
wmi_priv.enumeration_data = NULL;
}
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common methods for use with dell-wmi-sysman
*
* Copyright (c) 2020 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/fs.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/wmi.h>
#include "dell-wmi-sysman.h"
#include "../../firmware_attributes_class.h"
#define MAX_TYPES 4
#include <linux/nls.h>
struct wmi_sysman_priv wmi_priv = {
.mutex = __MUTEX_INITIALIZER(wmi_priv.mutex),
};
/* reset bios to defaults */
static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"};
static int reset_option = -1;
static struct class *fw_attr_class;
/**
* populate_string_buffer() - populates a string buffer
* @buffer: the start of the destination buffer
* @buffer_len: length of the destination buffer
* @str: the string to insert into buffer
*/
ssize_t populate_string_buffer(char *buffer, size_t buffer_len, const char *str)
{
u16 *length = (u16 *)buffer;
u16 *target = length + 1;
int ret;
ret = utf8s_to_utf16s(str, strlen(str), UTF16_HOST_ENDIAN,
target, buffer_len - sizeof(u16));
if (ret < 0) {
dev_err(wmi_priv.class_dev, "UTF16 conversion failed\n");
return ret;
}
if ((ret * sizeof(u16)) > U16_MAX) {
dev_err(wmi_priv.class_dev, "Error string too long\n");
return -ERANGE;
}
*length = ret * sizeof(u16);
return sizeof(u16) + *length;
}
/**
* calculate_string_buffer() - determines size of string buffer for use with BIOS communication
* @str: the string to calculate based upon
*
*/
size_t calculate_string_buffer(const char *str)
{
/* u16 length field + one UTF16 char for each input char */
return sizeof(u16) + strlen(str) * sizeof(u16);
}
/**
* calculate_security_buffer() - determines size of security buffer for authentication scheme
* @authentication: the authentication content
*
* Currently only supported type is Admin password
*/
size_t calculate_security_buffer(char *authentication)
{
if (strlen(authentication) > 0) {
return (sizeof(u32) * 2) + strlen(authentication) +
strlen(authentication) % 2;
}
return sizeof(u32) * 2;
}
/**
* populate_security_buffer() - builds a security buffer for authentication scheme
* @buffer: the buffer to populate
* @authentication: the authentication content
*
* Currently only supported type is PLAIN TEXT
*/
void populate_security_buffer(char *buffer, char *authentication)
{
char *auth = buffer + sizeof(u32) * 2;
u32 *sectype = (u32 *) buffer;
u32 *seclen = sectype + 1;
*sectype = strlen(authentication) > 0 ? 1 : 0;
*seclen = strlen(authentication);
/* plain text */
if (strlen(authentication) > 0)
memcpy(auth, authentication, *seclen);
}
/**
* map_wmi_error() - map errors from WMI methods to kernel error codes
* @error_code: integer error code returned from Dell's firmware
*/
int map_wmi_error(int error_code)
{
switch (error_code) {
case 0:
/* success */
return 0;
case 1:
/* failed */
return -EIO;
case 2:
/* invalid parameter */
return -EINVAL;
case 3:
/* access denied */
return -EACCES;
case 4:
/* not supported */
return -EOPNOTSUPP;
case 5:
/* memory error */
return -ENOMEM;
case 6:
/* protocol error */
return -EPROTO;
}
/* unspecified error */
return -EIO;
}
/**
* reset_bios_show() - sysfs implementaton for read reset_bios
* @kobj: Kernel object for this attribute
* @attr: Kernel object attribute
* @buf: The buffer to display to userspace
*/
static ssize_t reset_bios_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
char *start = buf;
int i;
for (i = 0; i < MAX_TYPES; i++) {
if (i == reset_option)
buf += sprintf(buf, "[%s] ", reset_types[i]);
else
buf += sprintf(buf, "%s ", reset_types[i]);
}
buf += sprintf(buf, "\n");
return buf-start;
}
/**
* reset_bios_store() - sysfs implementaton for write reset_bios
* @kobj: Kernel object for this attribute
* @attr: Kernel object attribute
* @buf: The buffer from userspace
* @count: the size of the buffer from userspace
*/
static ssize_t reset_bios_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int type = sysfs_match_string(reset_types, buf);
int ret;
if (type < 0)
return type;
ret = set_bios_defaults(type);
pr_debug("reset all attributes request type %d: %d\n", type, ret);
if (!ret) {
reset_option = type;
ret = count;
}
return ret;
}
/**
* pending_reboot_show() - sysfs implementaton for read pending_reboot
* @kobj: Kernel object for this attribute
* @attr: Kernel object attribute
* @buf: The buffer to display to userspace
*
* Stores default value as 0
* When current_value is changed this attribute is set to 1 to notify reboot may be required
*/
static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", wmi_priv.pending_changes);
}
static struct kobj_attribute reset_bios = __ATTR_RW(reset_bios);
static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
/**
* create_attributes_level_sysfs_files() - Creates reset_bios and
* pending_reboot attributes
*/
static int create_attributes_level_sysfs_files(void)
{
int ret;
ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
if (ret)
return ret;
ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
if (ret)
return ret;
return 0;
}
static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->show)
ret = kattr->show(kobj, kattr, buf);
return ret;
}
static ssize_t wmi_sysman_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct kobj_attribute *kattr;
ssize_t ret = -EIO;
kattr = container_of(attr, struct kobj_attribute, attr);
if (kattr->store)
ret = kattr->store(kobj, kattr, buf, count);
return ret;
}
static const struct sysfs_ops wmi_sysman_kobj_sysfs_ops = {
.show = wmi_sysman_attr_show,
.store = wmi_sysman_attr_store,
};
static void attr_name_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct kobj_type attr_name_ktype = {
.release = attr_name_release,
.sysfs_ops = &wmi_sysman_kobj_sysfs_ops,
};
/**
* strlcpy_attr - Copy a length-limited, NULL-terminated string with bound checks
* @dest: Where to copy the string to
* @src: Where to copy the string from
*/
void strlcpy_attr(char *dest, char *src)
{
size_t len = strlen(src) + 1;
if (len > 1 && len <= MAX_BUFF)
strscpy(dest, src, len);
/*len can be zero because any property not-applicable to attribute can
* be empty so check only for too long buffers and log error
*/
if (len > MAX_BUFF)
pr_err("Source string returned from BIOS is out of bound!\n");
}
/**
* get_wmiobj_pointer() - Get Content of WMI block for particular instance
* @instance_id: WMI instance ID
* @guid_string: WMI GUID (in str form)
*
* Fetches the content for WMI block (instance_id) under GUID (guid_string)
* Caller must kfree the return
*/
union acpi_object *get_wmiobj_pointer(int instance_id, const char *guid_string)
{
struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
status = wmi_query_block(guid_string, instance_id, &out);
return ACPI_SUCCESS(status) ? (union acpi_object *)out.pointer : NULL;
}
/**
* get_instance_count() - Compute total number of instances under guid_string
* @guid_string: WMI GUID (in string form)
*/
int get_instance_count(const char *guid_string)
{
int ret;
ret = wmi_instance_count(guid_string);
if (ret < 0)
return 0;
return ret;
}
/**
* alloc_attributes_data() - Allocate attributes data for a particular type
* @attr_type: Attribute type to allocate
*/
static int alloc_attributes_data(int attr_type)
{
int retval = 0;
switch (attr_type) {
case ENUM:
retval = alloc_enum_data();
break;
case INT:
retval = alloc_int_data();
break;
case STR:
retval = alloc_str_data();
break;
case PO:
retval = alloc_po_data();
break;
default:
break;
}
return retval;
}
/**
* destroy_attribute_objs() - Free a kset of kobjects
* @kset: The kset to destroy
*
* Fress kobjects created for each attribute_name under attribute type kset
*/
static void destroy_attribute_objs(struct kset *kset)
{
struct kobject *pos, *next;
list_for_each_entry_safe(pos, next, &kset->list, entry) {
kobject_put(pos);
}
}
/**
* release_attributes_data() - Clean-up all sysfs directories and files created
*/
static void release_attributes_data(void)
{
mutex_lock(&wmi_priv.mutex);
exit_enum_attributes();
exit_int_attributes();
exit_str_attributes();
exit_po_attributes();
if (wmi_priv.authentication_dir_kset) {
destroy_attribute_objs(wmi_priv.authentication_dir_kset);
kset_unregister(wmi_priv.authentication_dir_kset);
wmi_priv.authentication_dir_kset = NULL;
}
if (wmi_priv.main_dir_kset) {
sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
destroy_attribute_objs(wmi_priv.main_dir_kset);
kset_unregister(wmi_priv.main_dir_kset);
wmi_priv.main_dir_kset = NULL;
}
mutex_unlock(&wmi_priv.mutex);
}
/**
* init_bios_attributes() - Initialize all attributes for a type
* @attr_type: The attribute type to initialize
* @guid: The WMI GUID associated with this type to initialize
*
* Initialiaze all 4 types of attributes enumeration, integer, string and password object.
* Populates each attrbute typ's respective properties under sysfs files
*/
static int init_bios_attributes(int attr_type, const char *guid)
{
struct kobject *attr_name_kobj; //individual attribute names
union acpi_object *obj = NULL;
union acpi_object *elements;
struct kobject *duplicate;
struct kset *tmp_set;
int min_elements;
/* instance_id needs to be reset for each type GUID
* also, instance IDs are unique within GUID but not across
*/
int instance_id = 0;
int retval = 0;
retval = alloc_attributes_data(attr_type);
if (retval)
return retval;
switch (attr_type) {
case ENUM: min_elements = 8; break;
case INT: min_elements = 9; break;
case STR: min_elements = 8; break;
case PO: min_elements = 4; break;
default:
pr_err("Error: Unknown attr_type: %d\n", attr_type);
return -EINVAL;
}
/* need to use specific instance_id and guid combination to get right data */
obj = get_wmiobj_pointer(instance_id, guid);
if (!obj)
return -ENODEV;
mutex_lock(&wmi_priv.mutex);
while (obj) {
if (obj->type != ACPI_TYPE_PACKAGE) {
pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
retval = -EIO;
goto err_attr_init;
}
if (obj->package.count < min_elements) {
pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
obj->package.count, min_elements);
goto nextobj;
}
elements = obj->package.elements;
/* sanity checking */
if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
pr_debug("incorrect element type\n");
goto nextobj;
}
if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
pr_debug("empty attribute found\n");
goto nextobj;
}
if (attr_type == PO)
tmp_set = wmi_priv.authentication_dir_kset;
else
tmp_set = wmi_priv.main_dir_kset;
duplicate = kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer);
if (duplicate) {
pr_debug("Duplicate attribute name found - %s\n",
elements[ATTR_NAME].string.pointer);
kobject_put(duplicate);
goto nextobj;
}
/* build attribute */
attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
if (!attr_name_kobj) {
retval = -ENOMEM;
goto err_attr_init;
}
attr_name_kobj->kset = tmp_set;
retval = kobject_init_and_add(attr_name_kobj, &attr_name_ktype, NULL, "%s",
elements[ATTR_NAME].string.pointer);
if (retval) {
kobject_put(attr_name_kobj);
goto err_attr_init;
}
/* enumerate all of this attribute */
switch (attr_type) {
case ENUM:
retval = populate_enum_data(elements, instance_id, attr_name_kobj,
obj->package.count);
break;
case INT:
retval = populate_int_data(elements, instance_id, attr_name_kobj);
break;
case STR:
retval = populate_str_data(elements, instance_id, attr_name_kobj);
break;
case PO:
retval = populate_po_data(elements, instance_id, attr_name_kobj);
break;
default:
break;
}
if (retval) {
pr_debug("failed to populate %s\n",
elements[ATTR_NAME].string.pointer);
goto err_attr_init;
}
nextobj:
kfree(obj);
instance_id++;
obj = get_wmiobj_pointer(instance_id, guid);
}
mutex_unlock(&wmi_priv.mutex);
return 0;
err_attr_init:
mutex_unlock(&wmi_priv.mutex);
kfree(obj);
return retval;
}
static int __init sysman_init(void)
{
int ret = 0;
if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
pr_err("Unable to run on non-Dell system\n");
return -ENODEV;
}
ret = init_bios_attr_set_interface();
if (ret)
return ret;
ret = init_bios_attr_pass_interface();
if (ret)
goto err_exit_bios_attr_set_interface;
if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) {
pr_debug("failed to find set or pass interface\n");
ret = -ENODEV;
goto err_exit_bios_attr_pass_interface;
}
ret = fw_attributes_class_get(&fw_attr_class);
if (ret)
goto err_exit_bios_attr_pass_interface;
wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME);
if (IS_ERR(wmi_priv.class_dev)) {
ret = PTR_ERR(wmi_priv.class_dev);
goto err_unregister_class;
}
wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
&wmi_priv.class_dev->kobj);
if (!wmi_priv.main_dir_kset) {
ret = -ENOMEM;
goto err_destroy_classdev;
}
wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
&wmi_priv.class_dev->kobj);
if (!wmi_priv.authentication_dir_kset) {
ret = -ENOMEM;
goto err_release_attributes_data;
}
ret = create_attributes_level_sysfs_files();
if (ret) {
pr_debug("could not create reset BIOS attribute\n");
goto err_release_attributes_data;
}
ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
if (ret) {
pr_debug("failed to populate enumeration type attributes\n");
goto err_release_attributes_data;
}
ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
if (ret) {
pr_debug("failed to populate integer type attributes\n");
goto err_release_attributes_data;
}
ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
if (ret) {
pr_debug("failed to populate string type attributes\n");
goto err_release_attributes_data;
}
ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
if (ret) {
pr_debug("failed to populate pass object type attributes\n");
goto err_release_attributes_data;
}
return 0;
err_release_attributes_data:
release_attributes_data();
err_destroy_classdev:
device_destroy(fw_attr_class, MKDEV(0, 0));
err_unregister_class:
fw_attributes_class_put();
err_exit_bios_attr_pass_interface:
exit_bios_attr_pass_interface();
err_exit_bios_attr_set_interface:
exit_bios_attr_set_interface();
return ret;
}
static void __exit sysman_exit(void)
{
release_attributes_data();
device_destroy(fw_attr_class, MKDEV(0, 0));
fw_attributes_class_put();
exit_bios_attr_set_interface();
exit_bios_attr_pass_interface();
}
module_init(sysman_init);
module_exit(sysman_exit);
MODULE_AUTHOR("Mario Limonciello <[email protected]>");
MODULE_AUTHOR("Prasanth Ksr <[email protected]>");
MODULE_AUTHOR("Divya Bharathi <[email protected]>");
MODULE_DESCRIPTION("Dell platform setting control interface");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/x86/dell/dell-wmi-sysman/sysman.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD HSMP Platform Driver
* Copyright (c) 2022, AMD.
* All Rights Reserved.
*
* This file provides a device implementation for HSMP interface
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
#include <asm/amd_nb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/semaphore.h>
#define DRIVER_NAME "amd_hsmp"
#define DRIVER_VERSION "1.0"
/* HSMP Status / Error codes */
#define HSMP_STATUS_NOT_READY 0x00
#define HSMP_STATUS_OK 0x01
#define HSMP_ERR_INVALID_MSG 0xFE
#define HSMP_ERR_INVALID_INPUT 0xFF
/* Timeout in millsec */
#define HSMP_MSG_TIMEOUT 100
#define HSMP_SHORT_SLEEP 1
#define HSMP_WR true
#define HSMP_RD false
/*
* To access specific HSMP mailbox register, s/w writes the SMN address of HSMP mailbox
* register into the SMN_INDEX register, and reads/writes the SMN_DATA reg.
* Below are required SMN address for HSMP Mailbox register offsets in SMU address space
*/
#define SMN_HSMP_MSG_ID 0x3B10534
#define SMN_HSMP_MSG_RESP 0x3B10980
#define SMN_HSMP_MSG_DATA 0x3B109E0
#define HSMP_INDEX_REG 0xc4
#define HSMP_DATA_REG 0xc8
static struct semaphore *hsmp_sem;
static struct miscdevice hsmp_device;
static int amd_hsmp_rdwr(struct pci_dev *root, u32 address,
u32 *value, bool write)
{
int ret;
ret = pci_write_config_dword(root, HSMP_INDEX_REG, address);
if (ret)
return ret;
ret = (write ? pci_write_config_dword(root, HSMP_DATA_REG, *value)
: pci_read_config_dword(root, HSMP_DATA_REG, value));
return ret;
}
/*
* Send a message to the HSMP port via PCI-e config space registers.
*
* The caller is expected to zero out any unused arguments.
* If a response is expected, the number of response words should be greater than 0.
*
* Returns 0 for success and populates the requested number of arguments.
* Returns a negative error code for failure.
*/
static int __hsmp_send_message(struct pci_dev *root, struct hsmp_message *msg)
{
unsigned long timeout, short_sleep;
u32 mbox_status;
u32 index;
int ret;
/* Clear the status register */
mbox_status = HSMP_STATUS_NOT_READY;
ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_WR);
if (ret) {
pr_err("Error %d clearing mailbox status register\n", ret);
return ret;
}
index = 0;
/* Write any message arguments */
while (index < msg->num_args) {
ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
&msg->args[index], HSMP_WR);
if (ret) {
pr_err("Error %d writing message argument %d\n", ret, index);
return ret;
}
index++;
}
/* Write the message ID which starts the operation */
ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_ID, &msg->msg_id, HSMP_WR);
if (ret) {
pr_err("Error %d writing message ID %u\n", ret, msg->msg_id);
return ret;
}
/*
* Depending on when the trigger write completes relative to the SMU
* firmware 1 ms cycle, the operation may take from tens of us to 1 ms
* to complete. Some operations may take more. Therefore we will try
* a few short duration sleeps and switch to long sleeps if we don't
* succeed quickly.
*/
short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP);
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
while (time_before(jiffies, timeout)) {
ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_RESP, &mbox_status, HSMP_RD);
if (ret) {
pr_err("Error %d reading mailbox status\n", ret);
return ret;
}
if (mbox_status != HSMP_STATUS_NOT_READY)
break;
if (time_before(jiffies, short_sleep))
usleep_range(50, 100);
else
usleep_range(1000, 2000);
}
if (unlikely(mbox_status == HSMP_STATUS_NOT_READY)) {
return -ETIMEDOUT;
} else if (unlikely(mbox_status == HSMP_ERR_INVALID_MSG)) {
return -ENOMSG;
} else if (unlikely(mbox_status == HSMP_ERR_INVALID_INPUT)) {
return -EINVAL;
} else if (unlikely(mbox_status != HSMP_STATUS_OK)) {
pr_err("Message ID %u unknown failure (status = 0x%X)\n",
msg->msg_id, mbox_status);
return -EIO;
}
/*
* SMU has responded OK. Read response data.
* SMU reads the input arguments from eight 32 bit registers starting
* from SMN_HSMP_MSG_DATA and writes the response data to the same
* SMN_HSMP_MSG_DATA address.
* We copy the response data if any, back to the args[].
*/
index = 0;
while (index < msg->response_sz) {
ret = amd_hsmp_rdwr(root, SMN_HSMP_MSG_DATA + (index << 2),
&msg->args[index], HSMP_RD);
if (ret) {
pr_err("Error %d reading response %u for message ID:%u\n",
ret, index, msg->msg_id);
break;
}
index++;
}
return ret;
}
static int validate_message(struct hsmp_message *msg)
{
/* msg_id against valid range of message IDs */
if (msg->msg_id < HSMP_TEST || msg->msg_id >= HSMP_MSG_ID_MAX)
return -ENOMSG;
/* msg_id is a reserved message ID */
if (hsmp_msg_desc_table[msg->msg_id].type == HSMP_RSVD)
return -ENOMSG;
/* num_args and response_sz against the HSMP spec */
if (msg->num_args != hsmp_msg_desc_table[msg->msg_id].num_args ||
msg->response_sz != hsmp_msg_desc_table[msg->msg_id].response_sz)
return -EINVAL;
return 0;
}
int hsmp_send_message(struct hsmp_message *msg)
{
struct amd_northbridge *nb;
int ret;
if (!msg)
return -EINVAL;
nb = node_to_amd_nb(msg->sock_ind);
if (!nb || !nb->root)
return -ENODEV;
ret = validate_message(msg);
if (ret)
return ret;
/*
* The time taken by smu operation to complete is between
* 10us to 1ms. Sometime it may take more time.
* In SMP system timeout of 100 millisecs should
* be enough for the previous thread to finish the operation
*/
ret = down_timeout(&hsmp_sem[msg->sock_ind],
msecs_to_jiffies(HSMP_MSG_TIMEOUT));
if (ret < 0)
return ret;
ret = __hsmp_send_message(nb->root, msg);
up(&hsmp_sem[msg->sock_ind]);
return ret;
}
EXPORT_SYMBOL_GPL(hsmp_send_message);
static int hsmp_test(u16 sock_ind, u32 value)
{
struct hsmp_message msg = { 0 };
struct amd_northbridge *nb;
int ret = -ENODEV;
nb = node_to_amd_nb(sock_ind);
if (!nb || !nb->root)
return ret;
/*
* Test the hsmp port by performing TEST command. The test message
* takes one argument and returns the value of that argument + 1.
*/
msg.msg_id = HSMP_TEST;
msg.num_args = 1;
msg.response_sz = 1;
msg.args[0] = value;
msg.sock_ind = sock_ind;
ret = __hsmp_send_message(nb->root, &msg);
if (ret)
return ret;
/* Check the response value */
if (msg.args[0] != (value + 1)) {
pr_err("Socket %d test message failed, Expected 0x%08X, received 0x%08X\n",
sock_ind, (value + 1), msg.args[0]);
return -EBADE;
}
return ret;
}
static long hsmp_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int __user *arguser = (int __user *)arg;
struct hsmp_message msg = { 0 };
int ret;
if (copy_struct_from_user(&msg, sizeof(msg), arguser, sizeof(struct hsmp_message)))
return -EFAULT;
/*
* Check msg_id is within the range of supported msg ids
* i.e within the array bounds of hsmp_msg_desc_table
*/
if (msg.msg_id < HSMP_TEST || msg.msg_id >= HSMP_MSG_ID_MAX)
return -ENOMSG;
switch (fp->f_mode & (FMODE_WRITE | FMODE_READ)) {
case FMODE_WRITE:
/*
* Device is opened in O_WRONLY mode
* Execute only set/configure commands
*/
if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_SET)
return -EINVAL;
break;
case FMODE_READ:
/*
* Device is opened in O_RDONLY mode
* Execute only get/monitor commands
*/
if (hsmp_msg_desc_table[msg.msg_id].type != HSMP_GET)
return -EINVAL;
break;
case FMODE_READ | FMODE_WRITE:
/*
* Device is opened in O_RDWR mode
* Execute both get/monitor and set/configure commands
*/
break;
default:
return -EINVAL;
}
ret = hsmp_send_message(&msg);
if (ret)
return ret;
if (hsmp_msg_desc_table[msg.msg_id].response_sz > 0) {
/* Copy results back to user for get/monitor commands */
if (copy_to_user(arguser, &msg, sizeof(struct hsmp_message)))
return -EFAULT;
}
return 0;
}
static const struct file_operations hsmp_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = hsmp_ioctl,
.compat_ioctl = hsmp_ioctl,
};
static int hsmp_pltdrv_probe(struct platform_device *pdev)
{
int i;
hsmp_sem = devm_kzalloc(&pdev->dev,
(amd_nb_num() * sizeof(struct semaphore)),
GFP_KERNEL);
if (!hsmp_sem)
return -ENOMEM;
for (i = 0; i < amd_nb_num(); i++)
sema_init(&hsmp_sem[i], 1);
hsmp_device.name = "hsmp_cdev";
hsmp_device.minor = MISC_DYNAMIC_MINOR;
hsmp_device.fops = &hsmp_fops;
hsmp_device.parent = &pdev->dev;
hsmp_device.nodename = "hsmp";
hsmp_device.mode = 0644;
return misc_register(&hsmp_device);
}
static void hsmp_pltdrv_remove(struct platform_device *pdev)
{
misc_deregister(&hsmp_device);
}
static struct platform_driver amd_hsmp_driver = {
.probe = hsmp_pltdrv_probe,
.remove_new = hsmp_pltdrv_remove,
.driver = {
.name = DRIVER_NAME,
},
};
static struct platform_device *amd_hsmp_platdev;
static int __init hsmp_plt_init(void)
{
int ret = -ENODEV;
u16 num_sockets;
int i;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) {
pr_err("HSMP is not supported on Family:%x model:%x\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
return ret;
}
/*
* amd_nb_num() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
num_sockets = amd_nb_num();
if (num_sockets == 0)
return ret;
/* Test the hsmp interface on each socket */
for (i = 0; i < num_sockets; i++) {
ret = hsmp_test(i, 0xDEADBEEF);
if (ret) {
pr_err("HSMP is not supported on Fam:%x model:%x\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
pr_err("Or Is HSMP disabled in BIOS ?\n");
return -EOPNOTSUPP;
}
}
ret = platform_driver_register(&amd_hsmp_driver);
if (ret)
return ret;
amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!amd_hsmp_platdev) {
ret = -ENOMEM;
goto drv_unregister;
}
ret = platform_device_add(amd_hsmp_platdev);
if (ret) {
platform_device_put(amd_hsmp_platdev);
goto drv_unregister;
}
return 0;
drv_unregister:
platform_driver_unregister(&amd_hsmp_driver);
return ret;
}
static void __exit hsmp_plt_exit(void)
{
platform_device_unregister(amd_hsmp_platdev);
platform_driver_unregister(&amd_hsmp_driver);
}
device_initcall(hsmp_plt_init);
module_exit(hsmp_plt_exit);
MODULE_DESCRIPTION("AMD HSMP Platform Interface Driver");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/x86/amd/hsmp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Platform Management Framework Driver
*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include "pmf.h"
static struct cnqf_config config_store;
#ifdef CONFIG_AMD_PMF_DEBUG
static const char *state_as_str_cnqf(unsigned int state)
{
switch (state) {
case APMF_CNQF_TURBO:
return "turbo";
case APMF_CNQF_PERFORMANCE:
return "performance";
case APMF_CNQF_BALANCE:
return "balance";
case APMF_CNQF_QUIET:
return "quiet";
default:
return "Unknown CnQF State";
}
}
static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx)
{
int i;
pr_debug("Dynamic Slider %s Defaults - BEGIN\n", idx ? "DC" : "AC");
pr_debug("size: %u\n", data->size);
pr_debug("flags: 0x%x\n", data->flags);
/* Time constants */
pr_debug("t_perf_to_turbo: %u ms\n", data->t_perf_to_turbo);
pr_debug("t_balanced_to_perf: %u ms\n", data->t_balanced_to_perf);
pr_debug("t_quiet_to_balanced: %u ms\n", data->t_quiet_to_balanced);
pr_debug("t_balanced_to_quiet: %u ms\n", data->t_balanced_to_quiet);
pr_debug("t_perf_to_balanced: %u ms\n", data->t_perf_to_balanced);
pr_debug("t_turbo_to_perf: %u ms\n", data->t_turbo_to_perf);
for (i = 0 ; i < CNQF_MODE_MAX ; i++) {
pr_debug("pfloor_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].pfloor);
pr_debug("fppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].fppt);
pr_debug("sppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].sppt);
pr_debug("sppt_apuonly_%s: %u mW\n",
state_as_str_cnqf(i), data->ps[i].sppt_apu_only);
pr_debug("spl_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].spl);
pr_debug("stt_minlimit_%s: %u mW\n",
state_as_str_cnqf(i), data->ps[i].stt_min_limit);
pr_debug("stt_skintemp_apu_%s: %u C\n", state_as_str_cnqf(i),
data->ps[i].stt_skintemp[STT_TEMP_APU]);
pr_debug("stt_skintemp_hs2_%s: %u C\n", state_as_str_cnqf(i),
data->ps[i].stt_skintemp[STT_TEMP_HS2]);
pr_debug("fan_id_%s: %u\n", state_as_str_cnqf(i), data->ps[i].fan_id);
}
pr_debug("Dynamic Slider %s Defaults - END\n", idx ? "DC" : "AC");
}
#else
static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) {}
#endif
static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
struct cnqf_config *table)
{
struct power_table_control *pc;
pc = &config_store.mode_set[src][idx].power_control;
amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev,
config_store.mode_set[src][idx].fan_control.manual,
config_store.mode_set[src][idx].fan_control.fan_id);
return 0;
}
static void amd_pmf_update_power_threshold(int src)
{
struct cnqf_mode_settings *ts;
struct cnqf_tran_params *tp;
tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET];
ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
tp->power_threshold = ts->power_floor;
tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO];
ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
tp->power_threshold = ts->power_floor;
tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
tp->power_threshold = ts->power_floor;
tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
tp->power_threshold = ts->power_floor;
tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
ts = &config_store.mode_set[src][CNQF_MODE_QUIET];
tp->power_threshold = ts->power_floor;
tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
ts = &config_store.mode_set[src][CNQF_MODE_TURBO];
tp->power_threshold = ts->power_floor;
}
static const char *state_as_str(unsigned int state)
{
switch (state) {
case CNQF_MODE_QUIET:
return "QUIET";
case CNQF_MODE_BALANCE:
return "BALANCED";
case CNQF_MODE_TURBO:
return "TURBO";
case CNQF_MODE_PERFORMANCE:
return "PERFORMANCE";
default:
return "Unknown CnQF mode";
}
}
static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev)
{
if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) &&
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
return amd_pmf_get_power_source();
else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
return POWER_SOURCE_DC;
else
return POWER_SOURCE_AC;
}
int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms)
{
struct cnqf_tran_params *tp;
int src, i, j;
u32 avg_power = 0;
src = amd_pmf_cnqf_get_power_source(dev);
if (is_pprof_balanced(dev)) {
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
} else {
/*
* Return from here if the platform_profile is not balanced
* so that preference is given to user mode selection, rather
* than enforcing CnQF to run all the time (if enabled)
*/
return -EINVAL;
}
for (i = 0; i < CNQF_TRANSITION_MAX; i++) {
config_store.trans_param[src][i].timer += time_lapsed_ms;
config_store.trans_param[src][i].total_power += socket_power;
config_store.trans_param[src][i].count++;
tp = &config_store.trans_param[src][i];
#ifdef CONFIG_AMD_PMF_DEBUG
dev_dbg(dev->dev, "avg_power: %u mW total_power: %u mW count: %u timer: %u ms\n",
avg_power, config_store.trans_param[src][i].total_power,
config_store.trans_param[src][i].count,
config_store.trans_param[src][i].timer);
#endif
if (tp->timer >= tp->time_constant && tp->count) {
avg_power = tp->total_power / tp->count;
/* Reset the indices */
tp->timer = 0;
tp->total_power = 0;
tp->count = 0;
if ((tp->shifting_up && avg_power >= tp->power_threshold) ||
(!tp->shifting_up && avg_power <= tp->power_threshold)) {
tp->priority = true;
} else {
tp->priority = false;
}
}
}
dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
avg_power, socket_power, state_as_str(config_store.current_mode));
#ifdef CONFIG_AMD_PMF_DEBUG
dev_dbg(dev->dev, "[CNQF] priority1: %u priority2: %u priority3: %u\n",
config_store.trans_param[src][0].priority,
config_store.trans_param[src][1].priority,
config_store.trans_param[src][2].priority);
dev_dbg(dev->dev, "[CNQF] priority4: %u priority5: %u priority6: %u\n",
config_store.trans_param[src][3].priority,
config_store.trans_param[src][4].priority,
config_store.trans_param[src][5].priority);
#endif
for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
/* apply the highest priority */
if (config_store.trans_param[src][j].priority) {
if (config_store.current_mode !=
config_store.trans_param[src][j].target_mode) {
config_store.current_mode =
config_store.trans_param[src][j].target_mode;
dev_dbg(dev->dev, "Moving to Mode :%s\n",
state_as_str(config_store.current_mode));
amd_pmf_set_cnqf(dev, src,
config_store.current_mode, NULL);
}
break;
}
}
return 0;
}
static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output *out)
{
struct cnqf_tran_params *tp;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET];
tp->time_constant = out->t_balanced_to_quiet;
tp->target_mode = CNQF_MODE_QUIET;
tp->shifting_up = false;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
tp->time_constant = out->t_balanced_to_perf;
tp->target_mode = CNQF_MODE_PERFORMANCE;
tp->shifting_up = true;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
tp->time_constant = out->t_quiet_to_balanced;
tp->target_mode = CNQF_MODE_BALANCE;
tp->shifting_up = true;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
tp->time_constant = out->t_perf_to_balanced;
tp->target_mode = CNQF_MODE_BALANCE;
tp->shifting_up = false;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
tp->time_constant = out->t_turbo_to_perf;
tp->target_mode = CNQF_MODE_PERFORMANCE;
tp->shifting_up = false;
tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO];
tp->time_constant = out->t_perf_to_turbo;
tp->target_mode = CNQF_MODE_TURBO;
tp->shifting_up = true;
}
static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output *out)
{
struct cnqf_mode_settings *ms;
/* Quiet Mode */
ms = &config_store.mode_set[idx][CNQF_MODE_QUIET];
ms->power_floor = out->ps[APMF_CNQF_QUIET].pfloor;
ms->power_control.fppt = out->ps[APMF_CNQF_QUIET].fppt;
ms->power_control.sppt = out->ps[APMF_CNQF_QUIET].sppt;
ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_QUIET].sppt_apu_only;
ms->power_control.spl = out->ps[APMF_CNQF_QUIET].spl;
ms->power_control.stt_min = out->ps[APMF_CNQF_QUIET].stt_min_limit;
ms->power_control.stt_skin_temp[STT_TEMP_APU] =
out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU];
ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
out->ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2];
ms->fan_control.fan_id = out->ps[APMF_CNQF_QUIET].fan_id;
/* Balance Mode */
ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE];
ms->power_floor = out->ps[APMF_CNQF_BALANCE].pfloor;
ms->power_control.fppt = out->ps[APMF_CNQF_BALANCE].fppt;
ms->power_control.sppt = out->ps[APMF_CNQF_BALANCE].sppt;
ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_BALANCE].sppt_apu_only;
ms->power_control.spl = out->ps[APMF_CNQF_BALANCE].spl;
ms->power_control.stt_min = out->ps[APMF_CNQF_BALANCE].stt_min_limit;
ms->power_control.stt_skin_temp[STT_TEMP_APU] =
out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU];
ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
out->ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2];
ms->fan_control.fan_id = out->ps[APMF_CNQF_BALANCE].fan_id;
/* Performance Mode */
ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE];
ms->power_floor = out->ps[APMF_CNQF_PERFORMANCE].pfloor;
ms->power_control.fppt = out->ps[APMF_CNQF_PERFORMANCE].fppt;
ms->power_control.sppt = out->ps[APMF_CNQF_PERFORMANCE].sppt;
ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_PERFORMANCE].sppt_apu_only;
ms->power_control.spl = out->ps[APMF_CNQF_PERFORMANCE].spl;
ms->power_control.stt_min = out->ps[APMF_CNQF_PERFORMANCE].stt_min_limit;
ms->power_control.stt_skin_temp[STT_TEMP_APU] =
out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU];
ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
out->ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2];
ms->fan_control.fan_id = out->ps[APMF_CNQF_PERFORMANCE].fan_id;
/* Turbo Mode */
ms = &config_store.mode_set[idx][CNQF_MODE_TURBO];
ms->power_floor = out->ps[APMF_CNQF_TURBO].pfloor;
ms->power_control.fppt = out->ps[APMF_CNQF_TURBO].fppt;
ms->power_control.sppt = out->ps[APMF_CNQF_TURBO].sppt;
ms->power_control.sppt_apu_only = out->ps[APMF_CNQF_TURBO].sppt_apu_only;
ms->power_control.spl = out->ps[APMF_CNQF_TURBO].spl;
ms->power_control.stt_min = out->ps[APMF_CNQF_TURBO].stt_min_limit;
ms->power_control.stt_skin_temp[STT_TEMP_APU] =
out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU];
ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
out->ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2];
ms->fan_control.fan_id = out->ps[APMF_CNQF_TURBO].fan_id;
}
static int amd_pmf_check_flags(struct amd_pmf_dev *dev)
{
struct apmf_dyn_slider_output out = {};
if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC))
apmf_get_dyn_slider_def_ac(dev, &out);
else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
apmf_get_dyn_slider_def_dc(dev, &out);
return out.flags;
}
static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
{
struct apmf_dyn_slider_output out;
int i, j, ret;
for (i = 0; i < POWER_SOURCE_MAX; i++) {
if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i))
continue;
if (i == POWER_SOURCE_AC)
ret = apmf_get_dyn_slider_def_ac(dev, &out);
else
ret = apmf_get_dyn_slider_def_dc(dev, &out);
if (ret) {
dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret);
return ret;
}
amd_pmf_cnqf_dump_defaults(&out, i);
amd_pmf_update_mode_set(i, &out);
amd_pmf_update_trans_data(i, &out);
amd_pmf_update_power_threshold(i);
for (j = 0; j < CNQF_MODE_MAX; j++) {
if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO)
config_store.mode_set[i][j].fan_control.manual = false;
else
config_store.mode_set[i][j].fan_control.manual = true;
}
}
/* set to initial default values */
config_store.current_mode = CNQF_MODE_BALANCE;
return 0;
}
static ssize_t cnqf_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
int result, src;
bool input;
result = kstrtobool(buf, &input);
if (result)
return result;
src = amd_pmf_cnqf_get_power_source(pdev);
pdev->cnqf_enabled = input;
if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
} else {
if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
amd_pmf_set_sps_power_limits(pdev);
}
dev_dbg(pdev->dev, "Received CnQF %s\n", str_on_off(input));
return count;
}
static ssize_t cnqf_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", str_on_off(pdev->cnqf_enabled));
}
static DEVICE_ATTR_RW(cnqf_enable);
static umode_t cnqf_feature_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
return pdev->cnqf_supported ? attr->mode : 0;
}
static struct attribute *cnqf_feature_attrs[] = {
&dev_attr_cnqf_enable.attr,
NULL
};
const struct attribute_group cnqf_feature_attribute_group = {
.is_visible = cnqf_feature_is_visible,
.attrs = cnqf_feature_attrs,
};
void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev)
{
cancel_delayed_work_sync(&dev->work_buffer);
}
int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
{
int ret, src;
/*
* Note the caller of this function has already checked that both
* APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported.
*/
ret = amd_pmf_load_defaults_cnqf(dev);
if (ret < 0)
return ret;
amd_pmf_init_metrics_table(dev);
dev->cnqf_supported = true;
dev->cnqf_enabled = amd_pmf_check_flags(dev);
/* update the thermal for CnQF */
if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
src = amd_pmf_cnqf_get_power_source(dev);
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
}
return 0;
}
| linux-master | drivers/platform/x86/amd/pmf/cnqf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Platform Management Framework Driver
*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/workqueue.h>
#include "pmf.h"
static struct auto_mode_mode_config config_store;
static const char *state_as_str(unsigned int state);
#ifdef CONFIG_AMD_PMF_DEBUG
static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data)
{
struct auto_mode_mode_settings *its_mode;
pr_debug("Auto Mode Data - BEGIN\n");
/* time constant */
pr_debug("balanced_to_perf: %u ms\n",
data->transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant);
pr_debug("perf_to_balanced: %u ms\n",
data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant);
pr_debug("quiet_to_balanced: %u ms\n",
data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant);
pr_debug("balanced_to_quiet: %u ms\n",
data->transition[AUTO_TRANSITION_TO_QUIET].time_constant);
/* power floor */
pr_debug("pfloor_perf: %u mW\n", data->mode_set[AUTO_PERFORMANCE].power_floor);
pr_debug("pfloor_balanced: %u mW\n", data->mode_set[AUTO_BALANCE].power_floor);
pr_debug("pfloor_quiet: %u mW\n", data->mode_set[AUTO_QUIET].power_floor);
/* Power delta for mode change */
pr_debug("pd_balanced_to_perf: %u mW\n",
data->transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
pr_debug("pd_perf_to_balanced: %u mW\n",
data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
pr_debug("pd_quiet_to_balanced: %u mW\n",
data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
pr_debug("pd_balanced_to_quiet: %u mW\n",
data->transition[AUTO_TRANSITION_TO_QUIET].power_delta);
/* skin temperature limits */
its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
pr_debug("stt_apu_perf_on_lap: %u C\n",
its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
pr_debug("stt_hs2_perf_on_lap: %u C\n",
its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
pr_debug("stt_min_limit_perf_on_lap: %u mW\n", its_mode->power_control.stt_min);
its_mode = &data->mode_set[AUTO_PERFORMANCE];
pr_debug("stt_apu_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
pr_debug("stt_hs2_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
pr_debug("stt_min_limit_perf: %u mW\n", its_mode->power_control.stt_min);
its_mode = &data->mode_set[AUTO_BALANCE];
pr_debug("stt_apu_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
pr_debug("stt_hs2_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
pr_debug("stt_min_limit_balanced: %u mW\n", its_mode->power_control.stt_min);
its_mode = &data->mode_set[AUTO_QUIET];
pr_debug("stt_apu_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]);
pr_debug("stt_hs2_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]);
pr_debug("stt_min_limit_quiet: %u mW\n", its_mode->power_control.stt_min);
/* SPL based power limits */
its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP];
pr_debug("fppt_perf_on_lap: %u mW\n", its_mode->power_control.fppt);
pr_debug("sppt_perf_on_lap: %u mW\n", its_mode->power_control.sppt);
pr_debug("spl_perf_on_lap: %u mW\n", its_mode->power_control.spl);
pr_debug("sppt_apu_only_perf_on_lap: %u mW\n", its_mode->power_control.sppt_apu_only);
its_mode = &data->mode_set[AUTO_PERFORMANCE];
pr_debug("fppt_perf: %u mW\n", its_mode->power_control.fppt);
pr_debug("sppt_perf: %u mW\n", its_mode->power_control.sppt);
pr_debug("spl_perf: %u mW\n", its_mode->power_control.spl);
pr_debug("sppt_apu_only_perf: %u mW\n", its_mode->power_control.sppt_apu_only);
its_mode = &data->mode_set[AUTO_BALANCE];
pr_debug("fppt_balanced: %u mW\n", its_mode->power_control.fppt);
pr_debug("sppt_balanced: %u mW\n", its_mode->power_control.sppt);
pr_debug("spl_balanced: %u mW\n", its_mode->power_control.spl);
pr_debug("sppt_apu_only_balanced: %u mW\n", its_mode->power_control.sppt_apu_only);
its_mode = &data->mode_set[AUTO_QUIET];
pr_debug("fppt_quiet: %u mW\n", its_mode->power_control.fppt);
pr_debug("sppt_quiet: %u mW\n", its_mode->power_control.sppt);
pr_debug("spl_quiet: %u mW\n", its_mode->power_control.spl);
pr_debug("sppt_apu_only_quiet: %u mW\n", its_mode->power_control.sppt_apu_only);
/* Fan ID */
pr_debug("fan_id_perf: %lu\n",
data->mode_set[AUTO_PERFORMANCE].fan_control.fan_id);
pr_debug("fan_id_balanced: %lu\n",
data->mode_set[AUTO_BALANCE].fan_control.fan_id);
pr_debug("fan_id_quiet: %lu\n",
data->mode_set[AUTO_QUIET].fan_control.fan_id);
pr_debug("Auto Mode Data - END\n");
}
#else
static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) {}
#endif
static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
struct auto_mode_mode_config *table)
{
struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
config_store.mode_set[idx].fan_control.fan_id);
}
static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power)
{
int i, total = 0;
if (pdev->socket_power_history_idx == -1) {
for (i = 0; i < AVG_SAMPLE_SIZE; i++)
pdev->socket_power_history[i] = socket_power;
}
pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE;
pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power;
for (i = 0; i < AVG_SAMPLE_SIZE; i++)
total += pdev->socket_power_history[i];
return total / AVG_SAMPLE_SIZE;
}
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms)
{
int avg_power = 0;
bool update = false;
int i, j;
/* Get the average moving average computed by auto mode algorithm */
avg_power = amd_pmf_get_moving_avg(dev, socket_power);
for (i = 0; i < AUTO_TRANSITION_MAX; i++) {
if ((config_store.transition[i].shifting_up && avg_power >=
config_store.transition[i].power_threshold) ||
(!config_store.transition[i].shifting_up && avg_power <=
config_store.transition[i].power_threshold)) {
if (config_store.transition[i].timer <
config_store.transition[i].time_constant)
config_store.transition[i].timer += time_elapsed_ms;
} else {
config_store.transition[i].timer = 0;
}
if (config_store.transition[i].timer >=
config_store.transition[i].time_constant &&
!config_store.transition[i].applied) {
config_store.transition[i].applied = true;
update = true;
} else if (config_store.transition[i].timer <=
config_store.transition[i].time_constant &&
config_store.transition[i].applied) {
config_store.transition[i].applied = false;
update = true;
}
#ifdef CONFIG_AMD_PMF_DEBUG
dev_dbg(dev->dev, "[AUTO MODE] average_power : %d mW mode: %s\n", avg_power,
state_as_str(config_store.current_mode));
dev_dbg(dev->dev, "[AUTO MODE] time: %lld ms timer: %u ms tc: %u ms\n",
time_elapsed_ms, config_store.transition[i].timer,
config_store.transition[i].time_constant);
dev_dbg(dev->dev, "[AUTO MODE] shiftup: %u pt: %u mW pf: %u mW pd: %u mW\n",
config_store.transition[i].shifting_up,
config_store.transition[i].power_threshold,
config_store.mode_set[i].power_floor,
config_store.transition[i].power_delta);
#endif
}
dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
state_as_str(config_store.current_mode));
#ifdef CONFIG_AMD_PMF_DEBUG
dev_dbg(dev->dev, "[AUTO MODE] priority1: %u priority2: %u priority3: %u priority4: %u\n",
config_store.transition[0].applied,
config_store.transition[1].applied,
config_store.transition[2].applied,
config_store.transition[3].applied);
#endif
if (update) {
for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
/* Apply the mode with highest priority indentified */
if (config_store.transition[j].applied) {
if (config_store.current_mode !=
config_store.transition[j].target_mode) {
config_store.current_mode =
config_store.transition[j].target_mode;
dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n",
state_as_str(config_store.current_mode));
amd_pmf_set_automode(dev, config_store.current_mode, NULL);
}
break;
}
}
}
}
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event)
{
int mode = config_store.current_mode;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE;
if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) &&
mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) {
mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode;
amd_pmf_set_automode(dev, mode, NULL);
}
dev_dbg(dev->dev, "updated CQL thermals\n");
}
static void amd_pmf_get_power_threshold(void)
{
config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold =
config_store.mode_set[AUTO_BALANCE].power_floor -
config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold =
config_store.mode_set[AUTO_BALANCE].power_floor -
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta;
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold =
config_store.mode_set[AUTO_QUIET].power_floor -
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta;
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
config_store.mode_set[AUTO_PERFORMANCE].power_floor -
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
#ifdef CONFIG_AMD_PMF_DEBUG
pr_debug("[AUTO MODE TO_QUIET] pt: %u mW pf: %u mW pd: %u mW\n",
config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold,
config_store.mode_set[AUTO_BALANCE].power_floor,
config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta);
pr_debug("[AUTO MODE TO_PERFORMANCE] pt: %u mW pf: %u mW pd: %u mW\n",
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold,
config_store.mode_set[AUTO_BALANCE].power_floor,
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta);
pr_debug("[AUTO MODE QUIET_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE]
.power_threshold,
config_store.mode_set[AUTO_QUIET].power_floor,
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta);
pr_debug("[AUTO MODE PERFORMANCE_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n",
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE]
.power_threshold,
config_store.mode_set[AUTO_PERFORMANCE].power_floor,
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta);
#endif
}
static const char *state_as_str(unsigned int state)
{
switch (state) {
case AUTO_QUIET:
return "QUIET";
case AUTO_BALANCE:
return "BALANCED";
case AUTO_PERFORMANCE_ON_LAP:
return "ON_LAP";
case AUTO_PERFORMANCE:
return "PERFORMANCE";
default:
return "Unknown Auto Mode State";
}
}
static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
{
struct apmf_auto_mode output;
struct power_table_control *pwr_ctrl;
int i;
apmf_get_auto_mode_def(dev, &output);
/* time constant */
config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant =
output.balanced_to_quiet;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant =
output.balanced_to_perf;
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant =
output.quiet_to_balanced;
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant =
output.perf_to_balanced;
/* power floor */
config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet;
config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced;
config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf;
config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf;
/* Power delta for mode change */
config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta =
output.pd_balanced_to_quiet;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta =
output.pd_balanced_to_perf;
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta =
output.pd_quiet_to_balanced;
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta =
output.pd_perf_to_balanced;
/* Power threshold */
amd_pmf_get_power_threshold();
/* skin temperature limits */
pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control;
pwr_ctrl->spl = output.spl_quiet;
pwr_ctrl->sppt = output.sppt_quiet;
pwr_ctrl->fppt = output.fppt_quiet;
pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet;
pwr_ctrl->stt_min = output.stt_min_limit_quiet;
pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet;
pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet;
pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control;
pwr_ctrl->spl = output.spl_balanced;
pwr_ctrl->sppt = output.sppt_balanced;
pwr_ctrl->fppt = output.fppt_balanced;
pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced;
pwr_ctrl->stt_min = output.stt_min_limit_balanced;
pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced;
pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced;
pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control;
pwr_ctrl->spl = output.spl_perf;
pwr_ctrl->sppt = output.sppt_perf;
pwr_ctrl->fppt = output.fppt_perf;
pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf;
pwr_ctrl->stt_min = output.stt_min_limit_perf;
pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf;
pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf;
pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control;
pwr_ctrl->spl = output.spl_perf_on_lap;
pwr_ctrl->sppt = output.sppt_perf_on_lap;
pwr_ctrl->fppt = output.fppt_perf_on_lap;
pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap;
pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap;
pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap;
pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap;
/* Fan ID */
config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet;
config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced;
config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf;
config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id =
output.fan_id_perf;
config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
AUTO_PERFORMANCE;
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode =
AUTO_BALANCE;
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode =
AUTO_BALANCE;
config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false;
config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true;
config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true;
config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up =
false;
for (i = 0 ; i < AUTO_MODE_MAX ; i++) {
if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO)
config_store.mode_set[i].fan_control.manual = false;
else
config_store.mode_set[i].fan_control.manual = true;
}
/* set to initial default values */
config_store.current_mode = AUTO_BALANCE;
dev->socket_power_history_idx = -1;
amd_pmf_dump_auto_mode_defaults(&config_store);
}
int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
{
/*
* OEM BIOS implementation guide says that if the auto mode is enabled
* the platform_profile registration shall be done by the OEM driver.
* There could be cases where both static slider and auto mode BIOS
* functions are enabled, in that case enable static slider updates
* only if it advertised as supported.
*/
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
dev_dbg(dev->dev, "resetting AMT thermals\n");
amd_pmf_set_sps_power_limits(dev);
}
return 0;
}
void amd_pmf_handle_amt(struct amd_pmf_dev *dev)
{
amd_pmf_set_automode(dev, config_store.current_mode, NULL);
}
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
{
cancel_delayed_work_sync(&dev->work_buffer);
}
void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
{
amd_pmf_load_defaults_auto_mode(dev);
amd_pmf_init_metrics_table(dev);
}
| linux-master | drivers/platform/x86/amd/pmf/auto-mode.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMD Platform Management Framework Driver
*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#include <asm/amd_nb.h>
#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include "pmf.h"
/* PMF-SMU communication registers */
#define AMD_PMF_REGISTER_MESSAGE 0xA18
#define AMD_PMF_REGISTER_RESPONSE 0xA78
#define AMD_PMF_REGISTER_ARGUMENT 0xA58
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMF_MAPPING_SIZE 0x01000
#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
/* SMU Response Codes */
#define AMD_PMF_RESULT_OK 0x01
#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMF_RESULT_FAILED 0xFF
/* List of supported CPU ids */
#define AMD_CPU_ID_RMB 0x14b5
#define AMD_CPU_ID_PS 0x14e8
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PMF_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
/* override Metrics Table sample size time (in ms) */
static int metrics_table_loop_ms = 1000;
module_param(metrics_table_loop_ms, int, 0644);
MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
/* Force load on supported older platforms */
static bool force_load;
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
{
struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
if (event != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
return NOTIFY_DONE;
}
if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
amd_pmf_set_sps_power_limits(pmf);
if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
amd_pmf_power_slider_update_event(pmf);
return NOTIFY_OK;
}
static int current_power_limits_show(struct seq_file *seq, void *unused)
{
struct amd_pmf_dev *dev = seq->private;
struct amd_pmf_static_slider_granular table;
int mode, src = 0;
mode = amd_pmf_get_pprof_modes(dev);
if (mode < 0)
return mode;
src = amd_pmf_get_power_source();
amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
table.prop[src][mode].spl,
table.prop[src][mode].fppt,
table.prop[src][mode].sppt,
table.prop[src][mode].sppt_apu_only,
table.prop[src][mode].stt_min,
table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(current_power_limits);
static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
}
static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
¤t_power_limits_fops);
}
int amd_pmf_get_power_source(void)
{
if (power_supply_is_system_supplied() > 0)
return POWER_SOURCE_AC;
else
return POWER_SOURCE_DC;
}
static void amd_pmf_get_metrics(struct work_struct *work)
{
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
ktime_t time_elapsed_ms;
int socket_power;
mutex_lock(&dev->update_mutex);
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
/* Calculate the avg SoC power consumption */
socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
if (dev->amt_enabled) {
/* Apply the Auto Mode transition */
amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
}
if (dev->cnqf_enabled) {
/* Apply the CnQF transition */
amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
}
dev->start_time = ktime_to_ms(ktime_get());
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
mutex_unlock(&dev->update_mutex);
}
static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
{
return ioread32(dev->regbase + reg_offset);
}
static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
{
iowrite32(val, dev->regbase + reg_offset);
}
static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
{
u32 value;
value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
}
int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
{
int rc;
u32 val;
mutex_lock(&dev->lock);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
val, val != 0, PMF_MSG_DELAY_MIN_US,
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
goto out_unlock;
}
/* Write zero to response register */
amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
/* Write argument into argument register */
amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
/* Write message ID to message ID register */
amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
val, val != 0, PMF_MSG_DELAY_MIN_US,
PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
goto out_unlock;
}
switch (val) {
case AMD_PMF_RESULT_OK:
if (get) {
/* PMFW may take longer time to return back the data */
usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
*data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
}
break;
case AMD_PMF_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
goto out_unlock;
case AMD_PMF_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
goto out_unlock;
case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
case AMD_PMF_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
goto out_unlock;
}
out_unlock:
mutex_unlock(&dev->lock);
amd_pmf_dump_registers(dev);
return rc;
}
static const struct pci_device_id pmf_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ }
};
static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
{
u64 phys_addr;
u32 hi, low;
phys_addr = virt_to_phys(dev->buf);
hi = phys_addr >> 32;
low = phys_addr & GENMASK(31, 0);
amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
}
int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
{
/* Get Metrics Table Address */
dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
if (!dev->buf)
return -ENOMEM;
INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
amd_pmf_set_dram_addr(dev);
/*
* Start collecting the metrics data after a small delay
* or else, we might end up getting stale values from PMFW.
*/
schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
return 0;
}
static int amd_pmf_resume_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
if (pdev->buf)
amd_pmf_set_dram_addr(pdev);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
static void amd_pmf_init_features(struct amd_pmf_dev *dev)
{
int ret;
/* Enable Static Slider */
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
amd_pmf_init_sps(dev);
dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
power_supply_reg_notifier(&dev->pwr_src_notifier);
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
/* Enable Auto Mode */
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
/* Enable Cool n Quiet Framework (CnQF) */
ret = amd_pmf_init_cnqf(dev);
if (ret)
dev_warn(dev->dev, "CnQF Init failed\n");
}
}
static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
{
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_sps(dev);
}
if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
amd_pmf_deinit_cnqf(dev);
}
}
static const struct acpi_device_id amd_pmf_acpi_ids[] = {
{"AMDI0100", 0x100},
{"AMDI0102", 0},
{"AMDI0103", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
static int amd_pmf_probe(struct platform_device *pdev)
{
const struct acpi_device_id *id;
struct amd_pmf_dev *dev;
struct pci_dev *rdev;
u32 base_addr_lo;
u32 base_addr_hi;
u64 base_addr;
u32 val;
int err;
id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
if (!id)
return -ENODEV;
if (id->driver_data == 0x100 && !force_load)
return -ENODEV;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->dev = &pdev->dev;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
pci_dev_put(rdev);
return -ENODEV;
}
dev->cpu_id = rdev->device;
err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
if (err) {
dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}
base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
if (err) {
dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
pci_dev_put(rdev);
return pcibios_err_to_errno(err);
}
base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
pci_dev_put(rdev);
base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
AMD_PMF_MAPPING_SIZE);
if (!dev->regbase)
return -ENOMEM;
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
}
static void amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
kfree(dev->buf);
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
&cnqf_feature_attribute_group,
NULL,
};
static struct platform_driver amd_pmf_driver = {
.driver = {
.name = "amd-pmf",
.acpi_match_table = amd_pmf_acpi_ids,
.dev_groups = amd_pmf_driver_groups,
.pm = pm_sleep_ptr(&amd_pmf_pm),
},
.probe = amd_pmf_probe,
.remove_new = amd_pmf_remove,
};
module_platform_driver(amd_pmf_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
| linux-master | drivers/platform/x86/amd/pmf/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Platform Management Framework (PMF) Driver
*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#include "pmf.h"
static struct amd_pmf_static_slider_granular config_store;
#ifdef CONFIG_AMD_PMF_DEBUG
static const char *slider_as_str(unsigned int state)
{
switch (state) {
case POWER_MODE_PERFORMANCE:
return "PERFORMANCE";
case POWER_MODE_BALANCED_POWER:
return "BALANCED_POWER";
case POWER_MODE_POWER_SAVER:
return "POWER_SAVER";
default:
return "Unknown Slider State";
}
}
static const char *source_as_str(unsigned int state)
{
switch (state) {
case POWER_SOURCE_AC:
return "AC";
case POWER_SOURCE_DC:
return "DC";
default:
return "Unknown Power State";
}
}
static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
{
int i, j;
pr_debug("Static Slider Data - BEGIN\n");
for (i = 0; i < POWER_SOURCE_MAX; i++) {
for (j = 0; j < POWER_MODE_MAX; j++) {
pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j));
pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
pr_debug("STT_SkinTempLimit_APU: %u C\n",
data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
pr_debug("STT_SkinTempLimit_HS2: %u C\n",
data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
}
}
pr_debug("Static Slider Data - END\n");
}
#else
static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
#endif
static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
{
struct apmf_static_slider_granular_output output;
int i, j, idx = 0;
memset(&config_store, 0, sizeof(config_store));
apmf_get_static_slider_granular(dev, &output);
for (i = 0; i < POWER_SOURCE_MAX; i++) {
for (j = 0; j < POWER_MODE_MAX; j++) {
config_store.prop[i][j].spl = output.prop[idx].spl;
config_store.prop[i][j].sppt = output.prop[idx].sppt;
config_store.prop[i][j].sppt_apu_only =
output.prop[idx].sppt_apu_only;
config_store.prop[i][j].fppt = output.prop[idx].fppt;
config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
output.prop[idx].stt_skin_temp[STT_TEMP_APU];
config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
idx++;
}
}
amd_pmf_dump_sps_defaults(&config_store);
}
void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
struct amd_pmf_static_slider_granular *table)
{
int src = amd_pmf_get_power_source();
if (op == SLIDER_OP_SET) {
amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
config_store.prop[src][idx].sppt_apu_only, NULL);
amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
config_store.prop[src][idx].stt_min, NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
} else if (op == SLIDER_OP_GET) {
amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
&table->prop[src][idx].sppt_apu_only);
amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
&table->prop[src][idx].stt_min);
amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
}
}
int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
{
int mode;
mode = amd_pmf_get_pprof_modes(pmf);
if (mode < 0)
return mode;
amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
return 0;
}
bool is_pprof_balanced(struct amd_pmf_dev *pmf)
{
return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
}
static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
*profile = pmf->current_profile;
return 0;
}
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
{
int mode;
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
mode = POWER_MODE_POWER_SAVER;
break;
default:
dev_err(pmf->dev, "Unknown Platform Profile.\n");
return -EOPNOTSUPP;
}
return mode;
}
int amd_pmf_power_slider_update_event(struct amd_pmf_dev *dev)
{
u8 flag = 0;
int mode;
int src;
mode = amd_pmf_get_pprof_modes(dev);
if (mode < 0)
return mode;
src = amd_pmf_get_power_source();
if (src == POWER_SOURCE_AC) {
switch (mode) {
case POWER_MODE_PERFORMANCE:
flag |= BIT(AC_BEST_PERF);
break;
case POWER_MODE_BALANCED_POWER:
flag |= BIT(AC_BETTER_PERF);
break;
case POWER_MODE_POWER_SAVER:
flag |= BIT(AC_BETTER_BATTERY);
break;
default:
dev_err(dev->dev, "unsupported platform profile\n");
return -EOPNOTSUPP;
}
} else if (src == POWER_SOURCE_DC) {
switch (mode) {
case POWER_MODE_PERFORMANCE:
flag |= BIT(DC_BEST_PERF);
break;
case POWER_MODE_BALANCED_POWER:
flag |= BIT(DC_BETTER_PERF);
break;
case POWER_MODE_POWER_SAVER:
flag |= BIT(DC_BATTERY_SAVER);
break;
default:
dev_err(dev->dev, "unsupported platform profile\n");
return -EOPNOTSUPP;
}
}
apmf_os_power_slider_update(dev, flag);
return 0;
}
static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
int ret = 0;
pmf->current_profile = profile;
/* Notify EC about the slider position change */
if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
ret = amd_pmf_power_slider_update_event(pmf);
if (ret)
return ret;
}
if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
ret = amd_pmf_set_sps_power_limits(pmf);
if (ret)
return ret;
}
return 0;
}
int amd_pmf_init_sps(struct amd_pmf_dev *dev)
{
int err;
dev->current_profile = PLATFORM_PROFILE_BALANCED;
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
amd_pmf_load_defaults_sps(dev);
/* update SPS balanced power mode thermals */
amd_pmf_set_sps_power_limits(dev);
}
dev->pprof.profile_get = amd_pmf_profile_get;
dev->pprof.profile_set = amd_pmf_profile_set;
/* Setup supported modes */
set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
/* Create platform_profile structure and register */
err = platform_profile_register(&dev->pprof);
if (err)
dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
err);
return err;
}
void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
{
platform_profile_remove();
}
| linux-master | drivers/platform/x86/amd/pmf/sps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* AMD Platform Management Framework Driver
*
* Copyright (c) 2022, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#include <linux/acpi.h>
#include "pmf.h"
#define APMF_CQL_NOTIFICATION 2
#define APMF_AMT_NOTIFICATION 3
static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
struct acpi_object_list apmf_if_arg_list;
union acpi_object apmf_if_args[2];
acpi_status status;
apmf_if_arg_list.count = 2;
apmf_if_arg_list.pointer = &apmf_if_args[0];
apmf_if_args[0].type = ACPI_TYPE_INTEGER;
apmf_if_args[0].integer.value = fn;
if (param) {
apmf_if_args[1].type = ACPI_TYPE_BUFFER;
apmf_if_args[1].buffer.length = param->length;
apmf_if_args[1].buffer.pointer = param->pointer;
} else {
apmf_if_args[1].type = ACPI_TYPE_INTEGER;
apmf_if_args[1].integer.value = 0;
}
status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(pdev->dev, "APMF method:%d call failed\n", fn);
kfree(buffer.pointer);
return NULL;
}
return buffer.pointer;
}
static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz)
{
union acpi_object *info;
size_t size;
int err = 0;
info = apmf_if_call(pdev, fn, NULL);
if (!info)
return -EIO;
if (info->type != ACPI_TYPE_BUFFER) {
dev_err(pdev->dev, "object is not a buffer\n");
err = -EINVAL;
goto out;
}
if (info->buffer.length < 2) {
dev_err(pdev->dev, "buffer too small\n");
err = -EINVAL;
goto out;
}
size = *(u16 *)info->buffer.pointer;
if (info->buffer.length < size) {
dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n",
info->buffer.length, size);
err = -EINVAL;
goto out;
}
if (size < out_sz) {
dev_err(pdev->dev, "buffer too small %zu\n", size);
err = -EINVAL;
goto out;
}
memcpy(dest, info->buffer.pointer, out_sz);
out:
kfree(info);
return err;
}
int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
{
/* If bit-n is set, that indicates function n+1 is supported */
return !!(pdev->supported_func & BIT(index - 1));
}
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *data)
{
if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
return -EINVAL;
return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
data, sizeof(*data));
}
int apmf_os_power_slider_update(struct amd_pmf_dev *pdev, u8 event)
{
struct os_power_slider args;
struct acpi_buffer params;
union acpi_object *info;
int err = 0;
args.size = sizeof(args);
args.slider_event = event;
params.length = sizeof(args);
params.pointer = (void *)&args;
info = apmf_if_call(pdev, APMF_FUNC_OS_POWER_SLIDER_UPDATE, ¶ms);
if (!info)
err = -EIO;
kfree(info);
return err;
}
static void apmf_sbios_heartbeat_notify(struct work_struct *work)
{
struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
union acpi_object *info;
dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
if (!info)
goto out;
schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
out:
kfree(info);
}
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
{
union acpi_object *info;
struct apmf_fan_idx args;
struct acpi_buffer params;
int err = 0;
args.size = sizeof(args);
args.fan_ctl_mode = manual;
args.fan_ctl_idx = idx;
params.length = sizeof(args);
params.pointer = (void *)&args;
info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, ¶ms);
if (!info) {
err = -EIO;
goto out;
}
out:
kfree(info);
return err;
}
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
}
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
req, sizeof(*req));
}
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
struct apmf_sbios_req req;
int ret;
mutex_lock(&pmf_dev->update_mutex);
ret = apmf_get_sbios_requests(pmf_dev, &req);
if (ret) {
dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
goto out;
}
if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n",
req.amt_event ? "Enabled" : "Disabled");
pmf_dev->amt_enabled = !!req.amt_event;
if (pmf_dev->amt_enabled)
amd_pmf_handle_amt(pmf_dev);
else
amd_pmf_reset_amt(pmf_dev);
}
if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) {
dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n",
req.cql_event ? "Enabled" : "Disabled");
/* update the target mode information */
if (pmf_dev->amt_enabled)
amd_pmf_update_2_cql(pmf_dev, req.cql_event);
}
out:
mutex_unlock(&pmf_dev->update_mutex);
}
static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
{
struct apmf_verify_interface output;
int err;
err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output));
if (err)
return err;
pdev->supported_func = output.supported_functions;
dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
output.supported_functions, output.notification_mask);
return 0;
}
static int apmf_get_system_params(struct amd_pmf_dev *dev)
{
struct apmf_system_params params;
int err;
if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS))
return -EINVAL;
err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, ¶ms, sizeof(params));
if (err)
return err;
dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n",
params.valid_mask,
params.flags,
params.command_code,
params.heartbeat_int);
params.flags = params.flags & params.valid_mask;
dev->hb_interval = params.heartbeat_int;
return 0;
}
int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data));
}
int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
}
int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
acpi_status status;
/* Install the APMF Notify handler */
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) {
status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
apmf_event_handler, pmf_dev);
if (ACPI_FAILURE(status)) {
dev_err(pmf_dev->dev, "failed to install notify handler\n");
return -ENODEV;
}
/* Call the handler once manually to catch up with possibly missed notifies. */
apmf_event_handler(ahandle, 0, pmf_dev);
}
return 0;
}
void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
if (pmf_dev->hb_interval)
cancel_delayed_work_sync(&pmf_dev->heart_beat);
if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
{
int ret;
ret = apmf_if_verify_interface(pmf_dev);
if (ret) {
dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret);
goto out;
}
ret = apmf_get_system_params(pmf_dev);
if (ret) {
dev_dbg(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
goto out;
}
if (pmf_dev->hb_interval) {
/* send heartbeats only if the interval is not zero */
INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
schedule_delayed_work(&pmf_dev->heart_beat, 0);
}
out:
return ret;
}
| linux-master | drivers/platform/x86/amd/pmf/acpi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMD SoC Power Management Controller Driver
*
* Copyright (c) 2020, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Shyam Sundar S K <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/serio.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "pmc.h"
/* SMU communication registers */
#define AMD_PMC_REGISTER_MESSAGE 0x538
#define AMD_PMC_REGISTER_RESPONSE 0x980
#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
/* PMC Scratch Registers */
#define AMD_PMC_SCRATCH_REG_CZN 0x94
#define AMD_PMC_SCRATCH_REG_YC 0xD14
/* STB Registers */
#define AMD_PMC_STB_PMI_0 0x03E30600
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
#define AMD_PMC_STB_DUMMY_PC 0xC6000007
/* STB S2D(Spill to DRAM) has different message port offset */
#define AMD_S2D_REGISTER_MESSAGE 0xA20
#define AMD_S2D_REGISTER_RESPONSE 0xA80
#define AMD_S2D_REGISTER_ARGUMENT 0xA88
/* STB Spill to DRAM Parameters */
#define S2D_TELEMETRY_BYTES_MAX 0x100000
#define S2D_TELEMETRY_DRAMBYTES_MAX 0x1000000
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_MAPPING_SIZE 0x01000
#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
/* SMU Response Codes */
#define AMD_PMC_RESULT_OK 0x01
#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
#define AMD_PMC_RESULT_FAILED 0xFF
/* FCH SSC Registers */
#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
#define FCH_SSC_MAPPING_SIZE 0x800
#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
/* SMU Message Definations */
#define SMU_MSG_GETSMUVERSION 0x02
#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
#define SMU_MSG_LOG_START 0x06
#define SMU_MSG_LOG_RESET 0x07
#define SMU_MSG_LOG_DUMP_DATA 0x08
#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
/* List of supported CPU ids */
#define AMD_CPU_ID_RV 0x15D0
#define AMD_CPU_ID_RN 0x1630
#define AMD_CPU_ID_PCO AMD_CPU_ID_RV
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
#define AMD_CPU_ID_YC 0x14B5
#define AMD_CPU_ID_CB 0x14D8
#define AMD_CPU_ID_PS 0x14E8
#define AMD_CPU_ID_SP 0x14A4
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
#define FIFO_SIZE 4096
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
MSG_OS_HINT_RN,
};
enum s2d_arg {
S2D_TELEMETRY_SIZE = 0x01,
S2D_PHYS_ADDR_LOW,
S2D_PHYS_ADDR_HIGH,
S2D_NUM_SAMPLES,
S2D_DRAM_SIZE,
};
struct amd_pmc_bit_map {
const char *name;
u32 bit_mask;
};
static const struct amd_pmc_bit_map soc15_ip_blk[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
{"GFX", BIT(2)},
{"VDD", BIT(3)},
{"ACP", BIT(4)},
{"VCN", BIT(5)},
{"ISP", BIT(6)},
{"NBIO", BIT(7)},
{"DF", BIT(8)},
{"USB3_0", BIT(9)},
{"USB3_1", BIT(10)},
{"LAPIC", BIT(11)},
{"USB3_2", BIT(12)},
{"USB3_3", BIT(13)},
{"USB3_4", BIT(14)},
{"USB4_0", BIT(15)},
{"USB4_1", BIT(16)},
{"MPM", BIT(17)},
{"JPEG", BIT(18)},
{"IPU", BIT(19)},
{"UMSCH", BIT(20)},
{}
};
static bool enable_stb;
module_param(enable_stb, bool, 0644);
MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
static bool disable_workarounds;
module_param(disable_workarounds, bool, 0644);
MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
return ioread32(dev->regbase + reg_offset);
}
static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u32 val)
{
iowrite32(val, dev->regbase + reg_offset);
}
struct smu_metrics {
u32 table_version;
u32 hint_count;
u32 s0i3_last_entry_status;
u32 timein_s0i2;
u64 timeentering_s0i3_lastcapture;
u64 timeentering_s0i3_totaltime;
u64 timeto_resume_to_os_lastcapture;
u64 timeto_resume_to_os_totaltime;
u64 timein_s0i3_lastcapture;
u64 timein_s0i3_totaltime;
u64 timein_swdrips_lastcapture;
u64 timein_swdrips_totaltime;
u64 timecondition_notmet_lastcapture[32];
u64 timecondition_notmet_totaltime[32];
} __packed;
static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp)
{
struct amd_pmc_dev *dev = filp->f_inode->i_private;
u32 size = FIFO_SIZE * sizeof(u32);
u32 *buf;
int rc;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
rc = amd_pmc_read_stb(dev, buf);
if (rc) {
kfree(buf);
return rc;
}
filp->private_data = buf;
return rc;
}
static ssize_t amd_pmc_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
loff_t *pos)
{
if (!filp->private_data)
return -EINVAL;
return simple_read_from_buffer(buf, size, pos, filp->private_data,
FIFO_SIZE * sizeof(u32));
}
static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations amd_pmc_stb_debugfs_fops = {
.owner = THIS_MODULE,
.open = amd_pmc_stb_debugfs_open,
.read = amd_pmc_stb_debugfs_read,
.release = amd_pmc_stb_debugfs_release,
};
static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
{
struct amd_pmc_dev *dev = filp->f_inode->i_private;
u32 *buf, fsize, num_samples, stb_rdptr_offset = 0;
int ret;
/* Write dummy postcode while reading the STB buffer */
ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
if (ret)
dev_err(dev->dev, "error writing to STB: %d\n", ret);
buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Spill to DRAM num_samples uses separate SMU message port */
dev->msg_port = 1;
/* Get the num_samples to calculate the last push location */
ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true);
/* Clear msg_port for other SMU operation */
dev->msg_port = 0;
if (ret) {
dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
kfree(buf);
return ret;
}
/* Start capturing data from the last push location */
if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
fsize = S2D_TELEMETRY_BYTES_MAX;
stb_rdptr_offset = num_samples - fsize;
} else {
fsize = num_samples;
stb_rdptr_offset = 0;
}
memcpy_fromio(buf, dev->stb_virt_addr + stb_rdptr_offset, fsize);
filp->private_data = buf;
return 0;
}
static ssize_t amd_pmc_stb_debugfs_read_v2(struct file *filp, char __user *buf, size_t size,
loff_t *pos)
{
if (!filp->private_data)
return -EINVAL;
return simple_read_from_buffer(buf, size, pos, filp->private_data,
S2D_TELEMETRY_BYTES_MAX);
}
static int amd_pmc_stb_debugfs_release_v2(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = {
.owner = THIS_MODULE,
.open = amd_pmc_stb_debugfs_open_v2,
.read = amd_pmc_stb_debugfs_read_v2,
.release = amd_pmc_stb_debugfs_release_v2,
};
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
case AMD_CPU_ID_PCO:
case AMD_CPU_ID_RN:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
dev->num_ips = 12;
dev->s2d_msg_id = 0xBE;
break;
case AMD_CPU_ID_PS:
dev->num_ips = 21;
dev->s2d_msg_id = 0x85;
break;
}
}
static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
{
if (dev->cpu_id == AMD_CPU_ID_PCO) {
dev_warn_once(dev->dev, "SMU debugging info not supported on this platform\n");
return -EINVAL;
}
/* Get Active devices list from SMU */
if (!dev->active_ips)
amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true);
/* Get dram address */
if (!dev->smu_virt_addr) {
u32 phys_addr_low, phys_addr_hi;
u64 smu_phys_addr;
amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true);
amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true);
smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr,
sizeof(struct smu_metrics));
if (!dev->smu_virt_addr)
return -ENOMEM;
}
/* Start the logging */
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false);
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false);
return 0;
}
static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
{
if (!pdev->smu_virt_addr) {
int ret = amd_pmc_setup_smu_logging(pdev);
if (ret)
return ret;
}
if (pdev->cpu_id == AMD_CPU_ID_PCO)
return -ENODEV;
memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics));
return 0;
}
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
if (get_metrics_table(pdev, &table))
return;
if (!table.s0i3_last_entry_status)
dev_warn(pdev->dev, "Last suspend didn't reach deepest state\n");
pm_report_hw_sleep_time(table.s0i3_last_entry_status ?
table.timein_s0i3_lastcapture : 0);
}
static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
{
int rc;
u32 val;
if (dev->cpu_id == AMD_CPU_ID_PCO)
return -ENODEV;
rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true);
if (rc)
return rc;
dev->smu_program = (val >> 24) & GENMASK(7, 0);
dev->major = (val >> 16) & GENMASK(7, 0);
dev->minor = (val >> 8) & GENMASK(7, 0);
dev->rev = (val >> 0) & GENMASK(7, 0);
dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
dev->smu_program, dev->major, dev->minor, dev->rev);
return 0;
}
static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
if (!dev->major) {
int rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
}
static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
if (!dev->major) {
int rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
return sysfs_emit(buf, "%u\n", dev->smu_program);
}
static DEVICE_ATTR_RO(smu_fw_version);
static DEVICE_ATTR_RO(smu_program);
static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
if (pdev->cpu_id == AMD_CPU_ID_PCO)
return 0;
return 0444;
}
static struct attribute *pmc_attrs[] = {
&dev_attr_smu_fw_version.attr,
&dev_attr_smu_program.attr,
NULL,
};
static struct attribute_group pmc_attr_group = {
.attrs = pmc_attrs,
.is_visible = pmc_attr_is_visible,
};
static const struct attribute_group *pmc_groups[] = {
&pmc_attr_group,
NULL,
};
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
struct smu_metrics table;
int idx;
if (get_metrics_table(dev, &table))
return -EINVAL;
seq_puts(s, "\n=== SMU Statistics ===\n");
seq_printf(s, "Table Version: %d\n", table.table_version);
seq_printf(s, "Hint Count: %d\n", table.hint_count);
seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
"Unknown/Fail");
seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
seq_printf(s, "Time (in us) to resume from S0i3: %lld\n",
table.timeto_resume_to_os_lastcapture);
seq_puts(s, "\n=== Active time (in us) ===\n");
for (idx = 0 ; idx < dev->num_ips ; idx++) {
if (soc15_ip_blk[idx].bit_mask & dev->active_ips)
seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name,
table.timecondition_notmet_lastcapture[idx]);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(smu_fw_info);
static int s0ix_stats_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
u64 entry_time, exit_time, residency;
/* Use FCH registers to get the S0ix stats */
if (!dev->fch_virt_addr) {
u32 base_addr_lo = FCH_BASE_PHY_ADDR_LOW;
u32 base_addr_hi = FCH_BASE_PHY_ADDR_HIGH;
u64 fch_phys_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
dev->fch_virt_addr = devm_ioremap(dev->dev, fch_phys_addr, FCH_SSC_MAPPING_SIZE);
if (!dev->fch_virt_addr)
return -ENOMEM;
}
entry_time = ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_H_OFFSET);
entry_time = entry_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_ENTRY_TIME_L_OFFSET);
exit_time = ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_H_OFFSET);
exit_time = exit_time << 32 | ioread32(dev->fch_virt_addr + FCH_S0I3_EXIT_TIME_L_OFFSET);
/* It's in 48MHz. We need to convert it */
residency = exit_time - entry_time;
do_div(residency, 48);
seq_puts(s, "=== S0ix statistics ===\n");
seq_printf(s, "S0ix Entry Time: %lld\n", entry_time);
seq_printf(s, "S0ix Exit Time: %lld\n", exit_time);
seq_printf(s, "Residency Time: %lld\n", residency);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
struct seq_file *s)
{
u32 val;
int rc;
switch (pdev->cpu_id) {
case AMD_CPU_ID_CZN:
/* we haven't yet read SMU version */
if (!pdev->major) {
rc = amd_pmc_get_smu_version(pdev);
if (rc)
return rc;
}
if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
else
return -EINVAL;
break;
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
break;
default:
return -EINVAL;
}
if (dev)
pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val);
if (s)
seq_printf(s, "SMU idlemask : 0x%x\n", val);
return 0;
}
static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
{
return amd_pmc_idlemask_read(s->private, NULL, s);
}
DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
}
static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
return true;
default:
return false;
}
}
static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
{
dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL);
debugfs_create_file("smu_fw_info", 0644, dev->dbgfs_dir, dev,
&smu_fw_info_fops);
debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
&s0ix_stats_fops);
debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
&amd_pmc_idlemask_fops);
/* Enable STB only when the module_param is set */
if (enable_stb) {
if (amd_pmc_is_stb_supported(dev))
debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
&amd_pmc_stb_debugfs_fops_v2);
else
debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,
&amd_pmc_stb_debugfs_fops);
}
}
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
u32 value, message, argument, response;
if (dev->msg_port) {
message = AMD_S2D_REGISTER_MESSAGE;
argument = AMD_S2D_REGISTER_ARGUMENT;
response = AMD_S2D_REGISTER_RESPONSE;
} else {
message = AMD_PMC_REGISTER_MESSAGE;
argument = AMD_PMC_REGISTER_ARGUMENT;
response = AMD_PMC_REGISTER_RESPONSE;
}
value = amd_pmc_reg_read(dev, response);
dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
value = amd_pmc_reg_read(dev, argument);
dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
value = amd_pmc_reg_read(dev, message);
dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
}
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
{
int rc;
u32 val, message, argument, response;
mutex_lock(&dev->lock);
if (dev->msg_port) {
message = AMD_S2D_REGISTER_MESSAGE;
argument = AMD_S2D_REGISTER_ARGUMENT;
response = AMD_S2D_REGISTER_RESPONSE;
} else {
message = AMD_PMC_REGISTER_MESSAGE;
argument = AMD_PMC_REGISTER_ARGUMENT;
response = AMD_PMC_REGISTER_RESPONSE;
}
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + response,
val, val != 0, PMC_MSG_DELAY_MIN_US,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "failed to talk to SMU\n");
goto out_unlock;
}
/* Write zero to response register */
amd_pmc_reg_write(dev, response, 0);
/* Write argument into response register */
amd_pmc_reg_write(dev, argument, arg);
/* Write message ID to message ID register */
amd_pmc_reg_write(dev, message, msg);
/* Wait until we get a valid response */
rc = readx_poll_timeout(ioread32, dev->regbase + response,
val, val != 0, PMC_MSG_DELAY_MIN_US,
PMC_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
if (rc) {
dev_err(dev->dev, "SMU response timed out\n");
goto out_unlock;
}
switch (val) {
case AMD_PMC_RESULT_OK:
if (ret) {
/* PMFW may take longer time to return back the data */
usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
*data = amd_pmc_reg_read(dev, argument);
}
break;
case AMD_PMC_RESULT_CMD_REJECT_BUSY:
dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
rc = -EBUSY;
goto out_unlock;
case AMD_PMC_RESULT_CMD_UNKNOWN:
dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
rc = -EINVAL;
goto out_unlock;
case AMD_PMC_RESULT_CMD_REJECT_PREREQ:
case AMD_PMC_RESULT_FAILED:
default:
dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
rc = -EIO;
goto out_unlock;
}
out_unlock:
mutex_unlock(&dev->lock);
amd_pmc_dump_registers(dev);
return rc;
}
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
case AMD_CPU_ID_PCO:
return MSG_OS_HINT_PCO;
case AMD_CPU_ID_RN:
case AMD_CPU_ID_YC:
case AMD_CPU_ID_CB:
case AMD_CPU_ID_PS:
return MSG_OS_HINT_RN;
}
return -EINVAL;
}
static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev)
{
struct device *d;
int rc;
if (!pdev->major) {
rc = amd_pmc_get_smu_version(pdev);
if (rc)
return rc;
}
if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
return 0;
d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
if (!d)
return 0;
if (device_may_wakeup(d)) {
dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n");
disable_irq_wake(1);
device_set_wakeup_enable(d, false);
}
put_device(d);
return 0;
}
static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
{
struct rtc_device *rtc_device;
time64_t then, now, duration;
struct rtc_wkalrm alarm;
struct rtc_time tm;
int rc;
/* we haven't yet read SMU version */
if (!pdev->major) {
rc = amd_pmc_get_smu_version(pdev);
if (rc)
return rc;
}
if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
return 0;
rtc_device = rtc_class_open("rtc0");
if (!rtc_device)
return 0;
rc = rtc_read_alarm(rtc_device, &alarm);
if (rc)
return rc;
if (!alarm.enabled) {
dev_dbg(pdev->dev, "alarm not enabled\n");
return 0;
}
rc = rtc_read_time(rtc_device, &tm);
if (rc)
return rc;
then = rtc_tm_to_time64(&alarm.time);
now = rtc_tm_to_time64(&tm);
duration = then-now;
/* in the past */
if (then < now)
return 0;
/* will be stored in upper 16 bits of s0i3 hint argument,
* so timer wakeup from s0i3 is limited to ~18 hours or less
*/
if (duration <= 4 || duration > U16_MAX)
return -EINVAL;
*arg |= (duration << 16);
rc = rtc_alarm_irq_enable(rtc_device, 0);
pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration);
return rc;
}
static void amd_pmc_s2idle_prepare(void)
{
struct amd_pmc_dev *pdev = &pmc;
int rc;
u8 msg;
u32 arg = 1;
/* Reset and Start SMU logging - to monitor the s0i3 stats */
amd_pmc_setup_smu_logging(pdev);
/* Activate CZN specific platform bug workarounds */
if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
rc = amd_pmc_verify_czn_rtc(pdev, &arg);
if (rc) {
dev_err(pdev->dev, "failed to set RTC: %d\n", rc);
return;
}
}
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false);
if (rc) {
dev_err(pdev->dev, "suspend failed: %d\n", rc);
return;
}
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
static void amd_pmc_s2idle_check(void)
{
struct amd_pmc_dev *pdev = &pmc;
struct smu_metrics table;
int rc;
/* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
table.s0i3_last_entry_status)
usleep_range(10000, 20000);
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
{
if (pdev->cpu_id == AMD_CPU_ID_PCO)
return -ENODEV;
return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false);
}
static void amd_pmc_s2idle_restore(void)
{
struct amd_pmc_dev *pdev = &pmc;
int rc;
u8 msg;
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false);
if (rc)
dev_err(pdev->dev, "resume failed: %d\n", rc);
/* Let SMU know that we are looking for stats */
amd_pmc_dump_data(pdev);
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
if (rc)
dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
amd_pmc_process_restore_quirks(pdev);
}
static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
.check = amd_pmc_s2idle_check,
.restore = amd_pmc_s2idle_restore,
};
static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) {
int rc = amd_pmc_czn_wa_irq1(pdev);
if (rc) {
dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
return rc;
}
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL);
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ }
};
static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev)
{
int ret;
switch (dev->cpu_id) {
case AMD_CPU_ID_YC:
if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) {
ret = -EINVAL;
goto err_dram_size;
}
break;
default:
ret = -EINVAL;
goto err_dram_size;
}
ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true);
if (ret || !dev->dram_size)
goto err_dram_size;
return 0;
err_dram_size:
dev_err(dev->dev, "DRAM size command not supported for this platform\n");
return ret;
}
static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
{
u32 phys_addr_low, phys_addr_hi;
u64 stb_phys_addr;
u32 size = 0;
int ret;
/* Spill to DRAM feature uses separate SMU message port */
dev->msg_port = 1;
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true);
if (size != S2D_TELEMETRY_BYTES_MAX)
return -EIO;
/* Get DRAM size */
ret = amd_pmc_get_dram_size(dev);
if (ret)
dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX;
/* Get STB DRAM address */
amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true);
amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true);
stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low);
/* Clear msg_port for other SMU operation */
dev->msg_port = 0;
dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size);
if (!dev->stb_virt_addr)
return -ENOMEM;
return 0;
}
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
if (err) {
dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
return pcibios_err_to_errno(err);
}
return 0;
}
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
int i, err;
for (i = 0; i < FIFO_SIZE; i++) {
err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
if (err) {
dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
return pcibios_err_to_errno(err);
}
}
return 0;
}
static int amd_pmc_probe(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = &pmc;
struct pci_dev *rdev;
u32 base_addr_lo, base_addr_hi;
u64 base_addr;
int err;
u32 val;
dev->dev = &pdev->dev;
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
err = -ENODEV;
goto err_pci_dev_put;
}
dev->cpu_id = rdev->device;
if (dev->cpu_id == AMD_CPU_ID_SP) {
dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n");
err = -ENODEV;
goto err_pci_dev_put;
}
dev->rdev = rdev;
err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
if (err) {
dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
err = pcibios_err_to_errno(err);
goto err_pci_dev_put;
}
base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
if (err) {
dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
err = pcibios_err_to_errno(err);
goto err_pci_dev_put;
}
base_addr_hi = val & AMD_PMC_BASE_ADDR_LO_MASK;
base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMC_BASE_ADDR_OFFSET,
AMD_PMC_MAPPING_SIZE);
if (!dev->regbase) {
err = -ENOMEM;
goto err_pci_dev_put;
}
mutex_init(&dev->lock);
if (enable_stb && amd_pmc_is_stb_supported(dev)) {
err = amd_pmc_s2d_init(dev);
if (err)
goto err_pci_dev_put;
}
platform_set_drvdata(pdev, dev);
if (IS_ENABLED(CONFIG_SUSPEND)) {
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
if (err)
dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
if (!disable_workarounds)
amd_pmc_quirks_init(dev);
}
amd_pmc_dbgfs_register(dev);
pm_report_max_hw_sleep(U64_MAX);
return 0;
err_pci_dev_put:
pci_dev_put(rdev);
return err;
}
static void amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
if (IS_ENABLED(CONFIG_SUSPEND))
acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
}
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
{"AMDI0005", 0},
{"AMDI0006", 0},
{"AMDI0007", 0},
{"AMDI0008", 0},
{"AMDI0009", 0},
{"AMDI000A", 0},
{"AMD0004", 0},
{"AMD0005", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmc_acpi_ids);
static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
.dev_groups = pmc_groups,
.pm = pm_sleep_ptr(&amd_pmc_pm),
},
.probe = amd_pmc_probe,
.remove_new = amd_pmc_remove,
};
module_platform_driver(amd_pmc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("AMD PMC Driver");
| linux-master | drivers/platform/x86/amd/pmc/pmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AMD SoC Power Management Controller Driver Quirks
*
* Copyright (c) 2023, Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Author: Mario Limonciello <[email protected]>
*/
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include "pmc.h"
struct quirk_entry {
u32 s2idle_bug_mmio;
};
static struct quirk_entry quirk_s2idle_bug = {
.s2idle_bug_mmio = 0xfed80380,
};
static const struct dmi_system_id fwbug_list[] = {
{
.ident = "L14 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
}
},
{
.ident = "T14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
}
},
{
.ident = "X13 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
}
},
{
.ident = "T14 Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
}
},
{
.ident = "T14 Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
}
},
{
.ident = "T14 Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
}
},
{
.ident = "T14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
}
},
{
.ident = "T14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
}
},
{
.ident = "P14s Gen1 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
}
},
{
.ident = "P14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
}
},
{
.ident = "P14s Gen2 AMD",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
}
},
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
.driver_data = &quirk_s2idle_bug,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
}
},
{}
};
/*
* Laptops that run a SMI handler during the D3->D0 transition that occurs
* specifically when exiting suspend to idle which can cause
* large delays during resume when the IOMMU translation layer is enabled (the default
* behavior) for NVME devices:
*
* To avoid this firmware problem, skip the SMI handler on these machines before the
* D0 transition occurs.
*/
static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio)
{
void __iomem *addr;
u8 val;
if (!request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"))
return;
addr = ioremap(s2idle_bug_mmio, 1);
if (!addr)
goto cleanup_resource;
val = ioread8(addr);
iowrite8(val & ~BIT(0), addr);
iounmap(addr);
cleanup_resource:
release_mem_region(s2idle_bug_mmio, 1);
}
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev)
{
if (dev->quirks && dev->quirks->s2idle_bug_mmio)
amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio);
}
void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
{
const struct dmi_system_id *dmi_id;
dmi_id = dmi_first_match(fwbug_list);
if (!dmi_id)
return;
dev->quirks = dmi_id->driver_data;
if (dev->quirks->s2idle_bug_mmio)
pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
dmi_id->ident);
}
| linux-master | drivers/platform/x86/amd/pmc/pmc-quirks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ChromeOS Privacy Screen support
*
* Copyright (C) 2022 Google LLC
*
* This is the Chromeos privacy screen provider, present on certain chromebooks,
* represented by a GOOG0010 device in the ACPI. This ACPI device, if present,
* will cause the i915 drm driver to probe defer until this driver registers
* the privacy-screen.
*/
#include <linux/acpi.h>
#include <drm/drm_privacy_screen_driver.h>
/*
* The DSM (Device Specific Method) constants below are the agreed API with
* the firmware team, on how to control privacy screen using ACPI methods.
*/
#define PRIV_SCRN_DSM_REVID 1 /* DSM version */
#define PRIV_SCRN_DSM_FN_GET_STATUS 1 /* Get privacy screen status */
#define PRIV_SCRN_DSM_FN_ENABLE 2 /* Enable privacy screen */
#define PRIV_SCRN_DSM_FN_DISABLE 3 /* Disable privacy screen */
static const guid_t chromeos_privacy_screen_dsm_guid =
GUID_INIT(0xc7033113, 0x8720, 0x4ceb,
0x90, 0x90, 0x9d, 0x52, 0xb3, 0xe5, 0x2d, 0x73);
static void
chromeos_privacy_screen_get_hw_state(struct drm_privacy_screen
*drm_privacy_screen)
{
union acpi_object *obj;
acpi_handle handle;
struct device *privacy_screen =
drm_privacy_screen_get_drvdata(drm_privacy_screen);
handle = acpi_device_handle(to_acpi_device(privacy_screen));
obj = acpi_evaluate_dsm(handle, &chromeos_privacy_screen_dsm_guid,
PRIV_SCRN_DSM_REVID,
PRIV_SCRN_DSM_FN_GET_STATUS, NULL);
if (!obj) {
dev_err(privacy_screen,
"_DSM failed to get privacy-screen state\n");
return;
}
if (obj->type != ACPI_TYPE_INTEGER)
dev_err(privacy_screen,
"Bad _DSM to get privacy-screen state\n");
else if (obj->integer.value == 1)
drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
PRIVACY_SCREEN_ENABLED;
else
drm_privacy_screen->hw_state = drm_privacy_screen->sw_state =
PRIVACY_SCREEN_DISABLED;
ACPI_FREE(obj);
}
static int
chromeos_privacy_screen_set_sw_state(struct drm_privacy_screen
*drm_privacy_screen,
enum drm_privacy_screen_status state)
{
union acpi_object *obj = NULL;
acpi_handle handle;
struct device *privacy_screen =
drm_privacy_screen_get_drvdata(drm_privacy_screen);
handle = acpi_device_handle(to_acpi_device(privacy_screen));
if (state == PRIVACY_SCREEN_DISABLED) {
obj = acpi_evaluate_dsm(handle,
&chromeos_privacy_screen_dsm_guid,
PRIV_SCRN_DSM_REVID,
PRIV_SCRN_DSM_FN_DISABLE, NULL);
} else if (state == PRIVACY_SCREEN_ENABLED) {
obj = acpi_evaluate_dsm(handle,
&chromeos_privacy_screen_dsm_guid,
PRIV_SCRN_DSM_REVID,
PRIV_SCRN_DSM_FN_ENABLE, NULL);
} else {
dev_err(privacy_screen,
"Bad attempt to set privacy-screen status to %u\n",
state);
return -EINVAL;
}
if (!obj) {
dev_err(privacy_screen,
"_DSM failed to set privacy-screen state\n");
return -EIO;
}
drm_privacy_screen->hw_state = drm_privacy_screen->sw_state = state;
ACPI_FREE(obj);
return 0;
}
static const struct drm_privacy_screen_ops chromeos_privacy_screen_ops = {
.get_hw_state = chromeos_privacy_screen_get_hw_state,
.set_sw_state = chromeos_privacy_screen_set_sw_state,
};
static int chromeos_privacy_screen_add(struct acpi_device *adev)
{
struct drm_privacy_screen *drm_privacy_screen =
drm_privacy_screen_register(&adev->dev,
&chromeos_privacy_screen_ops,
&adev->dev);
if (IS_ERR(drm_privacy_screen)) {
dev_err(&adev->dev, "Error registering privacy-screen\n");
return PTR_ERR(drm_privacy_screen);
}
adev->driver_data = drm_privacy_screen;
dev_info(&adev->dev, "registered privacy-screen '%s'\n",
dev_name(&drm_privacy_screen->dev));
return 0;
}
static void chromeos_privacy_screen_remove(struct acpi_device *adev)
{
struct drm_privacy_screen *drm_privacy_screen = acpi_driver_data(adev);
drm_privacy_screen_unregister(drm_privacy_screen);
}
static const struct acpi_device_id chromeos_privacy_screen_device_ids[] = {
{"GOOG0010", 0}, /* Google's electronic privacy screen for eDP-1 */
{}
};
MODULE_DEVICE_TABLE(acpi, chromeos_privacy_screen_device_ids);
static struct acpi_driver chromeos_privacy_screen_driver = {
.name = "chromeos_privacy_screen_driver",
.class = "ChromeOS",
.ids = chromeos_privacy_screen_device_ids,
.ops = {
.add = chromeos_privacy_screen_add,
.remove = chromeos_privacy_screen_remove,
},
};
module_acpi_driver(chromeos_privacy_screen_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS ACPI Privacy Screen driver");
MODULE_AUTHOR("Rajat Jain <[email protected]>");
| linux-master | drivers/platform/chrome/chromeos_privacy_screen.c |
// SPDX-License-Identifier: GPL-2.0
// Driver to detect Tablet Mode for ChromeOS convertible.
//
// Copyright (C) 2017 Google, Inc.
// Author: Gwendal Grignou <[email protected]>
//
// On Chromebook using ACPI, this device listens for notification
// from GOOG0006 and issue method TBMC to retrieve the status.
//
// GOOG0006 issues the notification when it receives EC_HOST_EVENT_MODE_CHANGE
// from the EC.
// Method TBMC reads EC_ACPI_MEM_DEVICE_ORIENTATION byte from the shared
// memory region.
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/printk.h>
#define DRV_NAME "chromeos_tbmc"
#define ACPI_DRV_NAME "GOOG0006"
static int chromeos_tbmc_query_switch(struct acpi_device *adev,
struct input_dev *idev)
{
unsigned long long state;
acpi_status status;
status = acpi_evaluate_integer(adev->handle, "TBMC", NULL, &state);
if (ACPI_FAILURE(status))
return -ENODEV;
/* input layer checks if event is redundant */
input_report_switch(idev, SW_TABLET_MODE, state);
input_sync(idev);
return 0;
}
static __maybe_unused int chromeos_tbmc_resume(struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
return chromeos_tbmc_query_switch(adev, adev->driver_data);
}
static void chromeos_tbmc_notify(struct acpi_device *adev, u32 event)
{
acpi_pm_wakeup_event(&adev->dev);
switch (event) {
case 0x80:
chromeos_tbmc_query_switch(adev, adev->driver_data);
break;
default:
dev_err(&adev->dev, "Unexpected event: 0x%08X\n", event);
}
}
static int chromeos_tbmc_open(struct input_dev *idev)
{
struct acpi_device *adev = input_get_drvdata(idev);
return chromeos_tbmc_query_switch(adev, idev);
}
static int chromeos_tbmc_add(struct acpi_device *adev)
{
struct input_dev *idev;
struct device *dev = &adev->dev;
int ret;
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
idev->name = "Tablet Mode Switch";
idev->phys = acpi_device_hid(adev);
idev->id.bustype = BUS_HOST;
idev->id.version = 1;
idev->id.product = 0;
idev->open = chromeos_tbmc_open;
input_set_drvdata(idev, adev);
adev->driver_data = idev;
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
ret = input_register_device(idev);
if (ret) {
dev_err(dev, "cannot register input device\n");
return ret;
}
device_init_wakeup(dev, true);
return 0;
}
static const struct acpi_device_id chromeos_tbmc_acpi_device_ids[] = {
{ ACPI_DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, chromeos_tbmc_acpi_device_ids);
static SIMPLE_DEV_PM_OPS(chromeos_tbmc_pm_ops, NULL,
chromeos_tbmc_resume);
static struct acpi_driver chromeos_tbmc_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
.ids = chromeos_tbmc_acpi_device_ids,
.ops = {
.add = chromeos_tbmc_add,
.notify = chromeos_tbmc_notify,
},
.drv.pm = &chromeos_tbmc_pm_ops,
};
module_acpi_driver(chromeos_tbmc_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS ACPI tablet switch driver");
| linux-master | drivers/platform/chrome/chromeos_tbmc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CrOS Kunit tests utilities.
*/
#include <kunit/test.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include "cros_ec.h"
#include "cros_kunit_util.h"
int cros_kunit_ec_xfer_mock_default_result;
int cros_kunit_ec_xfer_mock_default_ret;
int cros_kunit_ec_cmd_xfer_mock_called;
int cros_kunit_ec_pkt_xfer_mock_called;
static struct list_head cros_kunit_ec_xfer_mock_in;
static struct list_head cros_kunit_ec_xfer_mock_out;
int cros_kunit_ec_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
struct ec_xfer_mock *mock;
mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_in, struct ec_xfer_mock, list);
if (!mock) {
msg->result = cros_kunit_ec_xfer_mock_default_result;
return cros_kunit_ec_xfer_mock_default_ret;
}
list_del(&mock->list);
memcpy(&mock->msg, msg, sizeof(*msg));
if (msg->outsize) {
mock->i_data = kunit_kzalloc(mock->test, msg->outsize, GFP_KERNEL);
if (mock->i_data)
memcpy(mock->i_data, msg->data, msg->outsize);
}
msg->result = mock->result;
if (msg->insize)
memcpy(msg->data, mock->o_data, min(msg->insize, mock->o_data_len));
list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_out);
return mock->ret;
}
int cros_kunit_ec_cmd_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
++cros_kunit_ec_cmd_xfer_mock_called;
return cros_kunit_ec_xfer_mock(ec_dev, msg);
}
int cros_kunit_ec_pkt_xfer_mock(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
++cros_kunit_ec_pkt_xfer_mock_called;
return cros_kunit_ec_xfer_mock(ec_dev, msg);
}
struct ec_xfer_mock *cros_kunit_ec_xfer_mock_add(struct kunit *test, size_t size)
{
return cros_kunit_ec_xfer_mock_addx(test, size, EC_RES_SUCCESS, size);
}
struct ec_xfer_mock *cros_kunit_ec_xfer_mock_addx(struct kunit *test,
int ret, int result, size_t size)
{
struct ec_xfer_mock *mock;
mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
if (!mock)
return NULL;
list_add_tail(&mock->list, &cros_kunit_ec_xfer_mock_in);
mock->test = test;
mock->ret = ret;
mock->result = result;
mock->o_data = kunit_kzalloc(test, size, GFP_KERNEL);
if (!mock->o_data)
return NULL;
mock->o_data_len = size;
return mock;
}
struct ec_xfer_mock *cros_kunit_ec_xfer_mock_next(void)
{
struct ec_xfer_mock *mock;
mock = list_first_entry_or_null(&cros_kunit_ec_xfer_mock_out, struct ec_xfer_mock, list);
if (mock)
list_del(&mock->list);
return mock;
}
int cros_kunit_readmem_mock_offset;
u8 *cros_kunit_readmem_mock_data;
int cros_kunit_readmem_mock_ret;
int cros_kunit_readmem_mock(struct cros_ec_device *ec_dev, unsigned int offset,
unsigned int bytes, void *dest)
{
cros_kunit_readmem_mock_offset = offset;
memcpy(dest, cros_kunit_readmem_mock_data, bytes);
return cros_kunit_readmem_mock_ret;
}
void cros_kunit_mock_reset(void)
{
cros_kunit_ec_xfer_mock_default_result = 0;
cros_kunit_ec_xfer_mock_default_ret = 0;
cros_kunit_ec_cmd_xfer_mock_called = 0;
cros_kunit_ec_pkt_xfer_mock_called = 0;
INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_in);
INIT_LIST_HEAD(&cros_kunit_ec_xfer_mock_out);
cros_kunit_readmem_mock_offset = 0;
cros_kunit_readmem_mock_data = NULL;
cros_kunit_readmem_mock_ret = 0;
}
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_kunit_util.c |
// SPDX-License-Identifier: GPL-2.0
// SPI interface for ChromeOS Embedded Controller
//
// Copyright (C) 2012 Google, Inc
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <uapi/linux/sched/types.h>
#include "cros_ec.h"
/* The header byte, which follows the preamble */
#define EC_MSG_HEADER 0xec
/*
* Number of EC preamble bytes we read at a time. Since it takes
* about 400-500us for the EC to respond there is not a lot of
* point in tuning this. If the EC could respond faster then
* we could increase this so that might expect the preamble and
* message to occur in a single transaction. However, the maximum
* SPI transfer size is 256 bytes, so at 5MHz we need a response
* time of perhaps <320us (200 bytes / 1600 bits).
*/
#define EC_MSG_PREAMBLE_COUNT 32
/*
* Allow for a long time for the EC to respond. We support i2c
* tunneling and support fairly long messages for the tunnel (249
* bytes long at the moment). If we're talking to a 100 kHz device
* on the other end and need to transfer ~256 bytes, then we need:
* 10 us/bit * ~10 bits/byte * ~256 bytes = ~25ms
*
* We'll wait 8 times that to handle clock stretching and other
* paranoia. Note that some battery gas gauge ICs claim to have a
* clock stretch of 144ms in rare situations. That's incentive for
* not directly passing i2c through, but it's too late for that for
* existing hardware.
*
* It's pretty unlikely that we'll really see a 249 byte tunnel in
* anything other than testing. If this was more common we might
* consider having slow commands like this require a GET_STATUS
* wait loop. The 'flash write' command would be another candidate
* for this, clocking in at 2-3ms.
*/
#define EC_MSG_DEADLINE_MS 200
/*
* Time between raising the SPI chip select (for the end of a
* transaction) and dropping it again (for the next transaction).
* If we go too fast, the EC will miss the transaction. We know that we
* need at least 70 us with the 16 MHz STM32 EC, so go with 200 us to be
* safe.
*/
#define EC_SPI_RECOVERY_TIME_NS (200 * 1000)
/**
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
* @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn off CS at the end of a transaction.
* @high_pri_worker: Used to schedule high priority work.
*/
struct cros_ec_spi {
struct spi_device *spi;
s64 last_transfer_ns;
unsigned int start_of_msg_delay;
unsigned int end_of_msg_delay;
struct kthread_worker *high_pri_worker;
};
typedef int (*cros_ec_xfer_fn_t) (struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg);
/**
* struct cros_ec_xfer_work_params - params for our high priority workers
*
* @work: The work_struct needed to queue work
* @fn: The function to use to transfer
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
* @ret: The return value of the function
*/
struct cros_ec_xfer_work_params {
struct kthread_work work;
cros_ec_xfer_fn_t fn;
struct cros_ec_device *ec_dev;
struct cros_ec_command *ec_msg;
int ret;
};
static void debug_packet(struct device *dev, const char *name, u8 *ptr,
int len)
{
#ifdef DEBUG
dev_dbg(dev, "%s: %*ph\n", name, len, ptr);
#endif
}
static int terminate_request(struct cros_ec_device *ec_dev)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_message msg;
struct spi_transfer trans;
int ret;
/*
* Turn off CS, possibly adding a delay to ensure the rising edge
* doesn't come too soon after the end of the data.
*/
spi_message_init(&msg);
memset(&trans, 0, sizeof(trans));
trans.delay.value = ec_spi->end_of_msg_delay;
trans.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
/* Reset end-of-response timer */
ec_spi->last_transfer_ns = ktime_get_ns();
if (ret < 0) {
dev_err(ec_dev->dev,
"cs-deassert spi transfer failed: %d\n",
ret);
}
return ret;
}
/**
* receive_n_bytes - receive n bytes from the EC.
*
* Assumes buf is a pointer into the ec_dev->din buffer
*
* @ec_dev: ChromeOS EC device.
* @buf: Pointer to the buffer receiving the data.
* @n: Number of bytes received.
*/
static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans;
struct spi_message msg;
int ret;
if (buf - ec_dev->din + n > ec_dev->din_size)
return -EINVAL;
memset(&trans, 0, sizeof(trans));
trans.cs_change = 1;
trans.rx_buf = buf;
trans.len = n;
spi_message_init(&msg);
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
if (ret < 0)
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
return ret;
}
/**
* cros_ec_spi_receive_packet - Receive a packet from the EC.
*
* This function has two phases: reading the preamble bytes (since if we read
* data from the EC before it is ready to send, we just get preamble) and
* reading the actual message.
*
* The received data is placed into ec_dev->din.
*
* @ec_dev: ChromeOS EC device
* @need_len: Number of message bytes we need to read
*/
static int cros_ec_spi_receive_packet(struct cros_ec_device *ec_dev,
int need_len)
{
struct ec_host_response *response;
u8 *ptr, *end;
int ret;
unsigned long deadline;
int todo;
if (ec_dev->din_size < EC_MSG_PREAMBLE_COUNT)
return -EINVAL;
/* Receive data until we see the header byte */
deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
while (true) {
unsigned long start_jiffies = jiffies;
ret = receive_n_bytes(ec_dev,
ec_dev->din,
EC_MSG_PREAMBLE_COUNT);
if (ret < 0)
return ret;
ptr = ec_dev->din;
for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
if (*ptr == EC_SPI_FRAME_START) {
dev_dbg(ec_dev->dev, "msg found at %zd\n",
ptr - ec_dev->din);
break;
}
}
if (ptr != end)
break;
/*
* Use the time at the start of the loop as a timeout. This
* gives us one last shot at getting the transfer and is useful
* in case we got context switched out for a while.
*/
if (time_after(start_jiffies, deadline)) {
dev_warn(ec_dev->dev, "EC failed to respond in time\n");
return -ETIMEDOUT;
}
}
/*
* ptr now points to the header byte. Copy any valid data to the
* start of our buffer
*/
todo = end - ++ptr;
todo = min(todo, need_len);
memmove(ec_dev->din, ptr, todo);
ptr = ec_dev->din + todo;
dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
need_len, todo);
need_len -= todo;
/* If the entire response struct wasn't read, get the rest of it. */
if (todo < sizeof(*response)) {
ret = receive_n_bytes(ec_dev, ptr, sizeof(*response) - todo);
if (ret < 0)
return -EBADMSG;
ptr += (sizeof(*response) - todo);
todo = sizeof(*response);
}
response = (struct ec_host_response *)ec_dev->din;
/* Abort if data_len is too large. */
if (response->data_len > ec_dev->din_size)
return -EMSGSIZE;
/* Receive data until we have it all */
while (need_len > 0) {
/*
* We can't support transfers larger than the SPI FIFO size
* unless we have DMA. We don't have DMA on the ISP SPI ports
* for Exynos. We need a way of asking SPI driver for
* maximum-supported transfer size.
*/
todo = min(need_len, 256);
dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
todo, need_len, ptr - ec_dev->din);
ret = receive_n_bytes(ec_dev, ptr, todo);
if (ret < 0)
return ret;
ptr += todo;
need_len -= todo;
}
dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
return 0;
}
/**
* cros_ec_spi_receive_response - Receive a response from the EC.
*
* This function has two phases: reading the preamble bytes (since if we read
* data from the EC before it is ready to send, we just get preamble) and
* reading the actual message.
*
* The received data is placed into ec_dev->din.
*
* @ec_dev: ChromeOS EC device
* @need_len: Number of message bytes we need to read
*/
static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
int need_len)
{
u8 *ptr, *end;
int ret;
unsigned long deadline;
int todo;
if (ec_dev->din_size < EC_MSG_PREAMBLE_COUNT)
return -EINVAL;
/* Receive data until we see the header byte */
deadline = jiffies + msecs_to_jiffies(EC_MSG_DEADLINE_MS);
while (true) {
unsigned long start_jiffies = jiffies;
ret = receive_n_bytes(ec_dev,
ec_dev->din,
EC_MSG_PREAMBLE_COUNT);
if (ret < 0)
return ret;
ptr = ec_dev->din;
for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
if (*ptr == EC_SPI_FRAME_START) {
dev_dbg(ec_dev->dev, "msg found at %zd\n",
ptr - ec_dev->din);
break;
}
}
if (ptr != end)
break;
/*
* Use the time at the start of the loop as a timeout. This
* gives us one last shot at getting the transfer and is useful
* in case we got context switched out for a while.
*/
if (time_after(start_jiffies, deadline)) {
dev_warn(ec_dev->dev, "EC failed to respond in time\n");
return -ETIMEDOUT;
}
}
/*
* ptr now points to the header byte. Copy any valid data to the
* start of our buffer
*/
todo = end - ++ptr;
todo = min(todo, need_len);
memmove(ec_dev->din, ptr, todo);
ptr = ec_dev->din + todo;
dev_dbg(ec_dev->dev, "need %d, got %d bytes from preamble\n",
need_len, todo);
need_len -= todo;
/* Receive data until we have it all */
while (need_len > 0) {
/*
* We can't support transfers larger than the SPI FIFO size
* unless we have DMA. We don't have DMA on the ISP SPI ports
* for Exynos. We need a way of asking SPI driver for
* maximum-supported transfer size.
*/
todo = min(need_len, 256);
dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
todo, need_len, ptr - ec_dev->din);
ret = receive_n_bytes(ec_dev, ptr, todo);
if (ret < 0)
return ret;
debug_packet(ec_dev->dev, "interim", ptr, todo);
ptr += todo;
need_len -= todo;
}
dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
return 0;
}
/**
* do_cros_ec_pkt_xfer_spi - Transfer a packet over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
struct ec_host_response *response;
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans, trans_delay;
struct spi_message msg;
int i, len;
u8 *ptr;
u8 *rx_buf;
u8 sum;
u8 rx_byte;
int ret = 0, final_ret;
unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
if (len < 0)
return len;
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
delay = ktime_get_ns() - ec_spi->last_transfer_ns;
if (delay < EC_SPI_RECOVERY_TIME_NS)
ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
return -ENOMEM;
spi_bus_lock(ec_spi->spi->master);
/*
* Leave a gap between CS assertion and clocking of data to allow the
* EC time to wakeup.
*/
spi_message_init(&msg);
if (ec_spi->start_of_msg_delay) {
memset(&trans_delay, 0, sizeof(trans_delay));
trans_delay.delay.value = ec_spi->start_of_msg_delay;
trans_delay.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans_delay, &msg);
}
/* Transmit phase - send our message */
memset(&trans, 0, sizeof(trans));
trans.tx_buf = ec_dev->dout;
trans.rx_buf = rx_buf;
trans.len = len;
trans.cs_change = 1;
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
/* Get the response */
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
rx_byte = rx_buf[i];
/*
* Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
* markers are all signs that the EC didn't fully
* receive our command. e.g., if the EC is flashing
* itself, it can't respond to any commands and instead
* clocks out EC_SPI_PAST_END from its SPI hardware
* buffer. Similar occurrences can happen if the AP is
* too slow to clock out data after asserting CS -- the
* EC will abort and fill its buffer with
* EC_SPI_RX_BAD_DATA.
*
* In all cases, these errors should be safe to retry.
* Report -EAGAIN and let the caller decide what to do
* about that.
*/
if (rx_byte == EC_SPI_PAST_END ||
rx_byte == EC_SPI_RX_BAD_DATA ||
rx_byte == EC_SPI_NOT_READY) {
ret = -EAGAIN;
break;
}
}
}
if (!ret)
ret = cros_ec_spi_receive_packet(ec_dev,
ec_msg->insize + sizeof(*response));
else if (ret != -EAGAIN)
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
if (!ret)
ret = final_ret;
if (ret < 0)
goto exit;
ptr = ec_dev->din;
/* check response error code */
response = (struct ec_host_response *)ptr;
ec_msg->result = response->result;
ret = cros_ec_check_result(ec_dev, ec_msg);
if (ret)
goto exit;
len = response->data_len;
sum = 0;
if (len > ec_msg->insize) {
dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
len, ec_msg->insize);
ret = -EMSGSIZE;
goto exit;
}
for (i = 0; i < sizeof(*response); i++)
sum += ptr[i];
/* copy response packet payload and compute checksum */
memcpy(ec_msg->data, ptr + sizeof(*response), len);
for (i = 0; i < len; i++)
sum += ec_msg->data[i];
if (sum) {
dev_err(ec_dev->dev,
"bad packet checksum, calculated %x\n",
sum);
ret = -EBADMSG;
goto exit;
}
ret = len;
exit:
kfree(rx_buf);
if (ec_msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
/**
* do_cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
static int do_cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans;
struct spi_message msg;
int i, len;
u8 *ptr;
u8 *rx_buf;
u8 rx_byte;
int sum;
int ret = 0, final_ret;
unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
if (len < 0)
return len;
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
delay = ktime_get_ns() - ec_spi->last_transfer_ns;
if (delay < EC_SPI_RECOVERY_TIME_NS)
ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
return -ENOMEM;
spi_bus_lock(ec_spi->spi->master);
/* Transmit phase - send our message */
debug_packet(ec_dev->dev, "out", ec_dev->dout, len);
memset(&trans, 0, sizeof(trans));
trans.tx_buf = ec_dev->dout;
trans.rx_buf = rx_buf;
trans.len = len;
trans.cs_change = 1;
spi_message_init(&msg);
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
/* Get the response */
if (!ret) {
/* Verify that EC can process command */
for (i = 0; i < len; i++) {
rx_byte = rx_buf[i];
/* See comments in cros_ec_pkt_xfer_spi() */
if (rx_byte == EC_SPI_PAST_END ||
rx_byte == EC_SPI_RX_BAD_DATA ||
rx_byte == EC_SPI_NOT_READY) {
ret = -EAGAIN;
break;
}
}
}
if (!ret)
ret = cros_ec_spi_receive_response(ec_dev,
ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
else if (ret != -EAGAIN)
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
final_ret = terminate_request(ec_dev);
spi_bus_unlock(ec_spi->spi->master);
if (!ret)
ret = final_ret;
if (ret < 0)
goto exit;
ptr = ec_dev->din;
/* check response error code */
ec_msg->result = ptr[0];
ret = cros_ec_check_result(ec_dev, ec_msg);
if (ret)
goto exit;
len = ptr[1];
sum = ptr[0] + ptr[1];
if (len > ec_msg->insize) {
dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
len, ec_msg->insize);
ret = -ENOSPC;
goto exit;
}
/* copy response packet payload and compute checksum */
for (i = 0; i < len; i++) {
sum += ptr[i + 2];
if (ec_msg->insize)
ec_msg->data[i] = ptr[i + 2];
}
sum &= 0xff;
debug_packet(ec_dev->dev, "in", ptr, len + 3);
if (sum != ptr[len + 2]) {
dev_err(ec_dev->dev,
"bad packet checksum, expected %02x, got %02x\n",
sum, ptr[len + 2]);
ret = -EBADMSG;
goto exit;
}
ret = len;
exit:
kfree(rx_buf);
if (ec_msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
static void cros_ec_xfer_high_pri_work(struct kthread_work *work)
{
struct cros_ec_xfer_work_params *params;
params = container_of(work, struct cros_ec_xfer_work_params, work);
params->ret = params->fn(params->ec_dev, params->ec_msg);
}
static int cros_ec_xfer_high_pri(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg,
cros_ec_xfer_fn_t fn)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct cros_ec_xfer_work_params params = {
.work = KTHREAD_WORK_INIT(params.work,
cros_ec_xfer_high_pri_work),
.ec_dev = ec_dev,
.ec_msg = ec_msg,
.fn = fn,
};
/*
* This looks a bit ridiculous. Why do the work on a
* different thread if we're just going to block waiting for
* the thread to finish? The key here is that the thread is
* running at high priority but the calling context might not
* be. We need to be at high priority to avoid getting
* context switched out for too long and the EC giving up on
* the transfer.
*/
kthread_queue_work(ec_spi->high_pri_worker, ¶ms.work);
kthread_flush_work(¶ms.work);
return params.ret;
}
static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_pkt_xfer_spi);
}
static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
return cros_ec_xfer_high_pri(ec_dev, ec_msg, do_cros_ec_cmd_xfer_spi);
}
static void cros_ec_spi_dt_probe(struct cros_ec_spi *ec_spi, struct device *dev)
{
struct device_node *np = dev->of_node;
u32 val;
int ret;
ret = of_property_read_u32(np, "google,cros-ec-spi-pre-delay", &val);
if (!ret)
ec_spi->start_of_msg_delay = val;
ret = of_property_read_u32(np, "google,cros-ec-spi-msg-delay", &val);
if (!ret)
ec_spi->end_of_msg_delay = val;
}
static void cros_ec_spi_high_pri_release(void *worker)
{
kthread_destroy_worker(worker);
}
static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
struct cros_ec_spi *ec_spi)
{
int err;
ec_spi->high_pri_worker =
kthread_create_worker(0, "cros_ec_spi_high_pri");
if (IS_ERR(ec_spi->high_pri_worker)) {
err = PTR_ERR(ec_spi->high_pri_worker);
dev_err(dev, "Can't create cros_ec high pri worker: %d\n", err);
return err;
}
err = devm_add_action_or_reset(dev, cros_ec_spi_high_pri_release,
ec_spi->high_pri_worker);
if (err)
return err;
sched_set_fifo(ec_spi->high_pri_worker->task);
return 0;
}
static int cros_ec_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct cros_ec_device *ec_dev;
struct cros_ec_spi *ec_spi;
int err;
spi->rt = true;
err = spi_setup(spi);
if (err < 0)
return err;
ec_spi = devm_kzalloc(dev, sizeof(*ec_spi), GFP_KERNEL);
if (ec_spi == NULL)
return -ENOMEM;
ec_spi->spi = spi;
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
/* Check for any DT properties */
cros_ec_spi_dt_probe(ec_spi, dev);
spi_set_drvdata(spi, ec_dev);
ec_dev->dev = dev;
ec_dev->priv = ec_spi;
ec_dev->irq = spi->irq;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_spi;
ec_dev->phys_name = dev_name(&ec_spi->spi->dev);
ec_dev->din_size = EC_MSG_PREAMBLE_COUNT +
sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
ec_spi->last_transfer_ns = ktime_get_ns();
err = cros_ec_spi_devm_high_pri_alloc(dev, ec_spi);
if (err)
return err;
err = cros_ec_register(ec_dev);
if (err) {
dev_err(dev, "cannot register EC\n");
return err;
}
device_init_wakeup(&spi->dev, true);
return 0;
}
static void cros_ec_spi_remove(struct spi_device *spi)
{
struct cros_ec_device *ec_dev = spi_get_drvdata(spi);
cros_ec_unregister(ec_dev);
}
#ifdef CONFIG_PM_SLEEP
static int cros_ec_spi_suspend(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_suspend(ec_dev);
}
static int cros_ec_spi_resume(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_resume(ec_dev);
}
#endif
static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
cros_ec_spi_resume);
static const struct of_device_id cros_ec_spi_of_match[] = {
{ .compatible = "google,cros-ec-spi", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
static const struct spi_device_id cros_ec_spi_id[] = {
{ "cros-ec-spi", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, cros_ec_spi_id);
static struct spi_driver cros_ec_driver_spi = {
.driver = {
.name = "cros-ec-spi",
.of_match_table = cros_ec_spi_of_match,
.pm = &cros_ec_spi_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = cros_ec_spi_probe,
.remove = cros_ec_spi_remove,
.id_table = cros_ec_spi_id,
};
module_spi_driver(cros_ec_driver_spi);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SPI interface for ChromeOS Embedded Controller");
| linux-master | drivers/platform/chrome/cros_ec_spi.c |
// SPDX-License-Identifier: GPL-2.0
// Driver to instantiate Chromebook ramoops device.
//
// Copyright (C) 2013 Google, Inc.
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pstore_ram.h>
static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
{
/*
* Today all Chromebooks/boxes ship with Google_* as version and
* coreboot as bios vendor. No other systems with this
* combination are known to date.
*/
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
},
},
{
/* x86-alex, the first Samsung Chromebook. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
},
{
/* x86-mario, the Cr-48 pilot device from Google. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
},
{
/* x86-zgb, the first Acer Chromebook. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
/*
* On x86 chromebooks/boxes, the firmware will keep the legacy VGA memory
* range untouched across reboots, so we use that to store our pstore
* contents for panic logs, etc.
*/
static struct ramoops_platform_data chromeos_ramoops_data = {
.mem_size = 0x100000,
.mem_address = 0xf00000,
.record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
.pmsg_size = 0x20000,
.max_reason = KMSG_DUMP_OOPS,
};
static struct platform_device chromeos_ramoops = {
.name = "ramoops",
.dev = {
.platform_data = &chromeos_ramoops_data,
},
};
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_ramoops_acpi_match[] = {
{ "GOOG9999", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
static struct platform_driver chromeos_ramoops_acpi = {
.driver = {
.name = "chromeos_pstore",
.acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
},
};
static int __init chromeos_probe_acpi(struct platform_device *pdev)
{
struct resource *res;
resource_size_t len;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOMEM;
len = resource_size(res);
if (!res->start || !len)
return -ENOMEM;
pr_info("chromeos ramoops using acpi device.\n");
chromeos_ramoops_data.mem_size = len;
chromeos_ramoops_data.mem_address = res->start;
return 0;
}
static bool __init chromeos_check_acpi(void)
{
if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
return true;
return false;
}
#else
static inline bool chromeos_check_acpi(void) { return false; }
#endif
static int __init chromeos_pstore_init(void)
{
bool acpi_dev_found;
/* First check ACPI for non-hardcoded values from firmware. */
acpi_dev_found = chromeos_check_acpi();
if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
return platform_device_register(&chromeos_ramoops);
return -ENODEV;
}
static void __exit chromeos_pstore_exit(void)
{
platform_device_unregister(&chromeos_ramoops);
}
module_init(chromeos_pstore_init);
module_exit(chromeos_pstore_exit);
MODULE_DESCRIPTION("ChromeOS pstore module");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/chrome/chromeos_pstore.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ChromeOS specific ACPI extensions
*
* Copyright 2022 Google LLC
*
* This driver attaches to the ChromeOS ACPI device and then exports the
* values reported by the ACPI in a sysfs directory. All values are
* presented in the string form (numbers as decimal values) and can be
* accessed as the contents of the appropriate read only files in the
* sysfs directory tree.
*/
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#define ACPI_ATTR_NAME_LEN 4
#define DEV_ATTR(_var, _name) \
static struct device_attribute dev_attr_##_var = \
__ATTR(_name, 0444, chromeos_first_level_attr_show, NULL);
#define GPIO_ATTR_GROUP(_group, _name, _num) \
static umode_t attr_is_visible_gpio_##_num(struct kobject *kobj, \
struct attribute *attr, int n) \
{ \
if (_num < chromeos_acpi_gpio_groups) \
return attr->mode; \
return 0; \
} \
static ssize_t chromeos_attr_show_gpio_##_num(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
char name[ACPI_ATTR_NAME_LEN + 1]; \
int ret, num; \
\
ret = parse_attr_name(attr->attr.name, name, &num); \
if (ret) \
return ret; \
return chromeos_acpi_evaluate_method(dev, _num, num, name, buf); \
} \
static struct device_attribute dev_attr_0_##_group = \
__ATTR(GPIO.0, 0444, chromeos_attr_show_gpio_##_num, NULL); \
static struct device_attribute dev_attr_1_##_group = \
__ATTR(GPIO.1, 0444, chromeos_attr_show_gpio_##_num, NULL); \
static struct device_attribute dev_attr_2_##_group = \
__ATTR(GPIO.2, 0444, chromeos_attr_show_gpio_##_num, NULL); \
static struct device_attribute dev_attr_3_##_group = \
__ATTR(GPIO.3, 0444, chromeos_attr_show_gpio_##_num, NULL); \
\
static struct attribute *attrs_##_group[] = { \
&dev_attr_0_##_group.attr, \
&dev_attr_1_##_group.attr, \
&dev_attr_2_##_group.attr, \
&dev_attr_3_##_group.attr, \
NULL \
}; \
static const struct attribute_group attr_group_##_group = { \
.name = _name, \
.is_visible = attr_is_visible_gpio_##_num, \
.attrs = attrs_##_group, \
};
static unsigned int chromeos_acpi_gpio_groups;
/* Parse the ACPI package and return the data related to that attribute */
static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *obj,
int pkg_num, int sub_pkg_num, char *name, char *buf)
{
union acpi_object *element = obj->package.elements;
if (pkg_num >= obj->package.count)
return -EINVAL;
element += pkg_num;
if (element->type == ACPI_TYPE_PACKAGE) {
if (sub_pkg_num >= element->package.count)
return -EINVAL;
/* select sub element inside this package */
element = element->package.elements;
element += sub_pkg_num;
}
switch (element->type) {
case ACPI_TYPE_INTEGER:
return sysfs_emit(buf, "%d\n", (int)element->integer.value);
case ACPI_TYPE_STRING:
return sysfs_emit(buf, "%s\n", element->string.pointer);
case ACPI_TYPE_BUFFER:
{
int i, r, at, room_left;
const int byte_per_line = 16;
at = 0;
room_left = PAGE_SIZE - 1;
for (i = 0; i < element->buffer.length && room_left; i += byte_per_line) {
r = hex_dump_to_buffer(element->buffer.pointer + i,
element->buffer.length - i,
byte_per_line, 1, buf + at, room_left,
false);
if (r > room_left)
goto truncating;
at += r;
room_left -= r;
r = sysfs_emit_at(buf, at, "\n");
if (!r)
goto truncating;
at += r;
room_left -= r;
}
buf[at] = 0;
return at;
truncating:
dev_info_once(dev, "truncating sysfs content for %s\n", name);
sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
return PAGE_SIZE - 1;
}
default:
dev_err(dev, "element type %d not supported\n", element->type);
return -EINVAL;
}
}
static int chromeos_acpi_evaluate_method(struct device *dev, int pkg_num, int sub_pkg_num,
char *name, char *buf)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
int ret = -EINVAL;
status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status));
return ret;
}
if (((union acpi_object *)output.pointer)->type == ACPI_TYPE_PACKAGE)
ret = chromeos_acpi_handle_package(dev, output.pointer, pkg_num, sub_pkg_num,
name, buf);
kfree(output.pointer);
return ret;
}
static int parse_attr_name(const char *name, char *attr_name, int *attr_num)
{
int ret;
ret = strscpy(attr_name, name, ACPI_ATTR_NAME_LEN + 1);
if (ret == -E2BIG)
return kstrtoint(&name[ACPI_ATTR_NAME_LEN + 1], 0, attr_num);
return 0;
}
static ssize_t chromeos_first_level_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
char attr_name[ACPI_ATTR_NAME_LEN + 1];
int ret, attr_num = 0;
ret = parse_attr_name(attr->attr.name, attr_name, &attr_num);
if (ret)
return ret;
return chromeos_acpi_evaluate_method(dev, attr_num, 0, attr_name, buf);
}
static unsigned int get_gpio_pkg_num(struct device *dev)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
unsigned int count = 0;
char *name = "GPIO";
status = acpi_evaluate_object(ACPI_HANDLE(dev), name, NULL, &output);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to retrieve %s. %s\n", name, acpi_format_exception(status));
return count;
}
obj = output.pointer;
if (obj->type == ACPI_TYPE_PACKAGE)
count = obj->package.count;
kfree(output.pointer);
return count;
}
DEV_ATTR(binf2, BINF.2)
DEV_ATTR(binf3, BINF.3)
DEV_ATTR(chsw, CHSW)
DEV_ATTR(fmap, FMAP)
DEV_ATTR(frid, FRID)
DEV_ATTR(fwid, FWID)
DEV_ATTR(hwid, HWID)
DEV_ATTR(meck, MECK)
DEV_ATTR(vbnv0, VBNV.0)
DEV_ATTR(vbnv1, VBNV.1)
DEV_ATTR(vdat, VDAT)
static struct attribute *first_level_attrs[] = {
&dev_attr_binf2.attr,
&dev_attr_binf3.attr,
&dev_attr_chsw.attr,
&dev_attr_fmap.attr,
&dev_attr_frid.attr,
&dev_attr_fwid.attr,
&dev_attr_hwid.attr,
&dev_attr_meck.attr,
&dev_attr_vbnv0.attr,
&dev_attr_vbnv1.attr,
&dev_attr_vdat.attr,
NULL
};
static const struct attribute_group first_level_attr_group = {
.attrs = first_level_attrs,
};
/*
* Every platform can have a different number of GPIO attribute groups.
* Define upper limit groups. At run time, the platform decides to show
* the present number of groups only, others are hidden.
*/
GPIO_ATTR_GROUP(gpio0, "GPIO.0", 0)
GPIO_ATTR_GROUP(gpio1, "GPIO.1", 1)
GPIO_ATTR_GROUP(gpio2, "GPIO.2", 2)
GPIO_ATTR_GROUP(gpio3, "GPIO.3", 3)
GPIO_ATTR_GROUP(gpio4, "GPIO.4", 4)
GPIO_ATTR_GROUP(gpio5, "GPIO.5", 5)
GPIO_ATTR_GROUP(gpio6, "GPIO.6", 6)
GPIO_ATTR_GROUP(gpio7, "GPIO.7", 7)
static const struct attribute_group *chromeos_acpi_all_groups[] = {
&first_level_attr_group,
&attr_group_gpio0,
&attr_group_gpio1,
&attr_group_gpio2,
&attr_group_gpio3,
&attr_group_gpio4,
&attr_group_gpio5,
&attr_group_gpio6,
&attr_group_gpio7,
NULL
};
static int chromeos_acpi_device_probe(struct platform_device *pdev)
{
chromeos_acpi_gpio_groups = get_gpio_pkg_num(&pdev->dev);
/*
* If the platform has more GPIO attribute groups than the number of
* groups this driver supports, give out a warning message.
*/
if (chromeos_acpi_gpio_groups > ARRAY_SIZE(chromeos_acpi_all_groups) - 2)
dev_warn(&pdev->dev, "Only %zu GPIO attr groups supported by the driver out of total %u.\n",
ARRAY_SIZE(chromeos_acpi_all_groups) - 2, chromeos_acpi_gpio_groups);
return 0;
}
static const struct acpi_device_id chromeos_device_ids[] = {
{ "GGL0001", 0 },
{ "GOOG0016", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, chromeos_device_ids);
static struct platform_driver chromeos_acpi_device_driver = {
.probe = chromeos_acpi_device_probe,
.driver = {
.name = KBUILD_MODNAME,
.dev_groups = chromeos_acpi_all_groups,
.acpi_match_table = chromeos_device_ids,
}
};
module_platform_driver(chromeos_acpi_device_driver);
MODULE_AUTHOR("Muhammad Usama Anjum <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS specific ACPI extensions");
| linux-master | drivers/platform/chrome/chromeos_acpi.c |
// SPDX-License-Identifier: GPL-2.0+
// Expose the Chromebook Pixel lightbar to userspace
//
// Copyright (C) 2014 Google, Inc.
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#define DRV_NAME "cros-ec-lightbar"
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
/*
* Whether or not we have given userspace control of the lightbar.
* If this is true, we won't do anything during suspend/resume.
*/
static bool userspace_control;
static ssize_t interval_msec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long msec = lb_interval_jiffies * 1000 / HZ;
return sysfs_emit(buf, "%lu\n", msec);
}
static ssize_t interval_msec_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long msec;
if (kstrtoul(buf, 0, &msec))
return -EINVAL;
lb_interval_jiffies = msec * HZ / 1000;
return count;
}
static DEFINE_MUTEX(lb_mutex);
/* Return 0 if able to throttle correctly, error otherwise */
static int lb_throttle(void)
{
static unsigned long last_access;
unsigned long now, next_timeslot;
long delay;
int ret = 0;
mutex_lock(&lb_mutex);
now = jiffies;
next_timeslot = last_access + lb_interval_jiffies;
if (time_before(now, next_timeslot)) {
delay = (long)(next_timeslot) - (long)now;
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(delay) > 0) {
/* interrupted - just abort */
ret = -EINTR;
goto out;
}
now = jiffies;
}
last_access = now;
out:
mutex_unlock(&lb_mutex);
return ret;
}
static struct cros_ec_command *alloc_lightbar_cmd_msg(struct cros_ec_dev *ec)
{
struct cros_ec_command *msg;
int len;
len = max(sizeof(struct ec_params_lightbar),
sizeof(struct ec_response_lightbar));
msg = kmalloc(sizeof(*msg) + len, GFP_KERNEL);
if (!msg)
return NULL;
msg->version = 0;
msg->command = EC_CMD_LIGHTBAR_CMD + ec->cmd_offset;
msg->outsize = sizeof(struct ec_params_lightbar);
msg->insize = sizeof(struct ec_response_lightbar);
return msg;
}
static int get_lightbar_version(struct cros_ec_dev *ec,
uint32_t *ver_ptr, uint32_t *flg_ptr)
{
struct ec_params_lightbar *param;
struct ec_response_lightbar *resp;
struct cros_ec_command *msg;
int ret;
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return 0;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_VERSION;
msg->outsize = sizeof(param->cmd);
msg->result = sizeof(resp->version);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0 && ret != -EINVAL) {
ret = 0;
goto exit;
}
switch (msg->result) {
case EC_RES_INVALID_PARAM:
/* Pixel had no version command. */
if (ver_ptr)
*ver_ptr = 0;
if (flg_ptr)
*flg_ptr = 0;
ret = 1;
goto exit;
case EC_RES_SUCCESS:
resp = (struct ec_response_lightbar *)msg->data;
/* Future devices w/lightbars should implement this command */
if (ver_ptr)
*ver_ptr = resp->version.num;
if (flg_ptr)
*flg_ptr = resp->version.flags;
ret = 1;
goto exit;
}
/* Anything else (ie, EC_RES_INVALID_COMMAND) - no lightbar */
ret = 0;
exit:
kfree(msg);
return ret;
}
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
uint32_t version = 0, flags = 0;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
int ret;
ret = lb_throttle();
if (ret)
return ret;
/* This should always succeed, because we check during init. */
if (!get_lightbar_version(ec, &version, &flags))
return -EIO;
return sysfs_emit(buf, "%d %d\n", version, flags);
}
static ssize_t brightness_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
int ret;
unsigned int val;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
if (kstrtouint(buf, 0, &val))
return -EINVAL;
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_SET_BRIGHTNESS;
param->set_brightness.num = val;
ret = lb_throttle();
if (ret)
goto exit;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
ret = count;
exit:
kfree(msg);
return ret;
}
/*
* We expect numbers, and we'll keep reading until we find them, skipping over
* any whitespace (sysfs guarantees that the input is null-terminated). Every
* four numbers are sent to the lightbar as <LED,R,G,B>. We fail at the first
* parsing error, if we don't parse any numbers, or if we have numbers left
* over.
*/
static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
unsigned int val[4];
int ret, i = 0, j = 0, ok = 0;
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
do {
/* Skip any whitespace */
while (*buf && isspace(*buf))
buf++;
if (!*buf)
break;
ret = sscanf(buf, "%i", &val[i++]);
if (ret == 0)
goto exit;
if (i == 4) {
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_SET_RGB;
param->set_rgb.led = val[0];
param->set_rgb.red = val[1];
param->set_rgb.green = val[2];
param->set_rgb.blue = val[3];
/*
* Throttle only the first of every four transactions,
* so that the user can update all four LEDs at once.
*/
if ((j++ % 4) == 0) {
ret = lb_throttle();
if (ret)
goto exit;
}
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
i = 0;
ok = 1;
}
/* Skip over the number we just read */
while (*buf && !isspace(*buf))
buf++;
} while (*buf);
exit:
kfree(msg);
return (ok && i == 0) ? count : -EINVAL;
}
static char const *seqname[] = {
"ERROR", "S5", "S3", "S0", "S5S3", "S3S0",
"S0S3", "S3S5", "STOP", "RUN", "KONAMI",
"TAP", "PROGRAM",
};
static ssize_t sequence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ec_params_lightbar *param;
struct ec_response_lightbar *resp;
struct cros_ec_command *msg;
int ret;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_GET_SEQ;
ret = lb_throttle();
if (ret)
goto exit;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
ret = sysfs_emit(buf, "XFER / EC ERROR %d / %d\n", ret, msg->result);
goto exit;
}
resp = (struct ec_response_lightbar *)msg->data;
if (resp->get_seq.num >= ARRAY_SIZE(seqname))
ret = sysfs_emit(buf, "%d\n", resp->get_seq.num);
else
ret = sysfs_emit(buf, "%s\n", seqname[resp->get_seq.num]);
exit:
kfree(msg);
return ret;
}
static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd)
{
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
int ret;
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = cmd;
ret = lb_throttle();
if (ret)
goto error;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
ret = 0;
error:
kfree(msg);
return ret;
}
static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable)
{
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
int ret;
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL;
param->manual_suspend_ctrl.enable = enable;
ret = lb_throttle();
if (ret)
goto error;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
ret = 0;
error:
kfree(msg);
return ret;
}
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
unsigned int num;
int ret, len;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
for (len = 0; len < count; len++)
if (!isalnum(buf[len]))
break;
for (num = 0; num < ARRAY_SIZE(seqname); num++)
if (!strncasecmp(seqname[num], buf, len))
break;
if (num >= ARRAY_SIZE(seqname)) {
ret = kstrtouint(buf, 0, &num);
if (ret)
return ret;
}
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_SEQ;
param->seq.num = num;
ret = lb_throttle();
if (ret)
goto exit;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
ret = count;
exit:
kfree(msg);
return ret;
}
static ssize_t program_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int extra_bytes, max_size, ret;
struct ec_params_lightbar *param;
struct cros_ec_command *msg;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
/*
* We might need to reject the program for size reasons. The EC
* enforces a maximum program size, but we also don't want to try
* and send a program that is too big for the protocol. In order
* to ensure the latter, we also need to ensure we have extra bytes
* to represent the rest of the packet.
*/
extra_bytes = sizeof(*param) - sizeof(param->set_program.data);
max_size = min(EC_LB_PROG_LEN, ec->ec_dev->max_request - extra_bytes);
if (count > max_size) {
dev_err(dev, "Program is %u bytes, too long to send (max: %u)",
(unsigned int)count, max_size);
return -EINVAL;
}
msg = alloc_lightbar_cmd_msg(ec);
if (!msg)
return -ENOMEM;
ret = lb_throttle();
if (ret)
goto exit;
dev_info(dev, "Copying %zu byte program to EC", count);
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_SET_PROGRAM;
param->set_program.size = count;
memcpy(param->set_program.data, buf, count);
/*
* We need to set the message size manually or else it will use
* EC_LB_PROG_LEN. This might be too long, and the program
* is unlikely to use all of the space.
*/
msg->outsize = count + extra_bytes;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
ret = count;
exit:
kfree(msg);
return ret;
}
static ssize_t userspace_control_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", userspace_control);
}
static ssize_t userspace_control_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
bool enable;
int ret;
ret = kstrtobool(buf, &enable);
if (ret < 0)
return ret;
userspace_control = enable;
return count;
}
/* Module initialization */
static DEVICE_ATTR_RW(interval_msec);
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_WO(brightness);
static DEVICE_ATTR_WO(led_rgb);
static DEVICE_ATTR_RW(sequence);
static DEVICE_ATTR_WO(program);
static DEVICE_ATTR_RW(userspace_control);
static struct attribute *__lb_cmds_attrs[] = {
&dev_attr_interval_msec.attr,
&dev_attr_version.attr,
&dev_attr_brightness.attr,
&dev_attr_led_rgb.attr,
&dev_attr_sequence.attr,
&dev_attr_program.attr,
&dev_attr_userspace_control.attr,
NULL,
};
static const struct attribute_group cros_ec_lightbar_attr_group = {
.name = "lightbar",
.attrs = __lb_cmds_attrs,
};
static int cros_ec_lightbar_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
struct cros_ec_platform *pdata = dev_get_platdata(ec_dev->dev);
struct device *dev = &pd->dev;
int ret;
/*
* Only instantiate the lightbar if the EC name is 'cros_ec'. Other EC
* devices like 'cros_pd' doesn't have a lightbar.
*/
if (strcmp(pdata->ec_name, CROS_EC_DEV_NAME) != 0)
return -ENODEV;
/*
* Ask then for the lightbar version, if it's 0 then the 'cros_ec'
* doesn't have a lightbar.
*/
if (!get_lightbar_version(ec_dev, NULL, NULL))
return -ENODEV;
/* Take control of the lightbar from the EC. */
lb_manual_suspend_ctrl(ec_dev, 1);
ret = sysfs_create_group(&ec_dev->class_dev.kobj,
&cros_ec_lightbar_attr_group);
if (ret < 0)
dev_err(dev, "failed to create %s attributes. err=%d\n",
cros_ec_lightbar_attr_group.name, ret);
return ret;
}
static int cros_ec_lightbar_remove(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
sysfs_remove_group(&ec_dev->class_dev.kobj,
&cros_ec_lightbar_attr_group);
/* Let the EC take over the lightbar again. */
lb_manual_suspend_ctrl(ec_dev, 0);
return 0;
}
static int __maybe_unused cros_ec_lightbar_resume(struct device *dev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
if (userspace_control)
return 0;
return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME);
}
static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
if (userspace_control)
return 0;
return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND);
}
static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops,
cros_ec_lightbar_suspend, cros_ec_lightbar_resume);
static struct platform_driver cros_ec_lightbar_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_lightbar_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = cros_ec_lightbar_probe,
.remove = cros_ec_lightbar_remove,
};
module_platform_driver(cros_ec_lightbar_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_ec_lightbar.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kunit tests for ChromeOS Embedded Controller protocol.
*/
#include <kunit/test.h>
#include <asm/unaligned.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include "cros_ec.h"
#include "cros_kunit_util.h"
#define BUFSIZE 512
struct cros_ec_proto_test_priv {
struct cros_ec_device ec_dev;
u8 dout[BUFSIZE];
u8 din[BUFSIZE];
struct cros_ec_command *msg;
u8 _msg[BUFSIZE];
};
static void cros_ec_proto_test_prepare_tx_legacy_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct cros_ec_command *msg = priv->msg;
int ret, i;
u8 csum;
ec_dev->proto_version = 2;
msg->command = EC_CMD_HELLO;
msg->outsize = EC_PROTO2_MAX_PARAM_SIZE;
msg->data[0] = 0xde;
msg->data[1] = 0xad;
msg->data[2] = 0xbe;
msg->data[3] = 0xef;
ret = cros_ec_prepare_tx(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, EC_MSG_TX_PROTO_BYTES + EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, ec_dev->dout[0], EC_CMD_VERSION0);
KUNIT_EXPECT_EQ(test, ec_dev->dout[1], EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, ec_dev->dout[2], EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, EC_MSG_TX_HEADER_BYTES, 3);
KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 0], 0xde);
KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 1], 0xad);
KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 2], 0xbe);
KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + 3], 0xef);
for (i = 4; i < EC_PROTO2_MAX_PARAM_SIZE; ++i)
KUNIT_EXPECT_EQ(test, ec_dev->dout[EC_MSG_TX_HEADER_BYTES + i], 0);
csum = EC_CMD_VERSION0;
csum += EC_CMD_HELLO;
csum += EC_PROTO2_MAX_PARAM_SIZE;
csum += 0xde;
csum += 0xad;
csum += 0xbe;
csum += 0xef;
KUNIT_EXPECT_EQ(test,
ec_dev->dout[EC_MSG_TX_HEADER_BYTES + EC_PROTO2_MAX_PARAM_SIZE],
csum);
}
static void cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct cros_ec_command *msg = priv->msg;
int ret;
ec_dev->proto_version = 2;
msg->outsize = EC_PROTO2_MAX_PARAM_SIZE + 1;
ret = cros_ec_prepare_tx(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, -EINVAL);
}
static void cros_ec_proto_test_prepare_tx_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct cros_ec_command *msg = priv->msg;
struct ec_host_request *request = (struct ec_host_request *)ec_dev->dout;
int ret, i;
u8 csum;
msg->command = EC_CMD_HELLO;
msg->outsize = 0x88;
msg->data[0] = 0xde;
msg->data[1] = 0xad;
msg->data[2] = 0xbe;
msg->data[3] = 0xef;
ret = cros_ec_prepare_tx(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, sizeof(*request) + 0x88);
KUNIT_EXPECT_EQ(test, request->struct_version, EC_HOST_REQUEST_VERSION);
KUNIT_EXPECT_EQ(test, request->command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, request->command_version, 0);
KUNIT_EXPECT_EQ(test, request->data_len, 0x88);
KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 0], 0xde);
KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 1], 0xad);
KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 2], 0xbe);
KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + 3], 0xef);
for (i = 4; i < 0x88; ++i)
KUNIT_EXPECT_EQ(test, ec_dev->dout[sizeof(*request) + i], 0);
csum = EC_HOST_REQUEST_VERSION;
csum += EC_CMD_HELLO;
csum += 0x88;
csum += 0xde;
csum += 0xad;
csum += 0xbe;
csum += 0xef;
KUNIT_EXPECT_EQ(test, request->checksum, (u8)-csum);
}
static void cros_ec_proto_test_prepare_tx_bad_msg_outsize(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct cros_ec_command *msg = priv->msg;
int ret;
msg->outsize = ec_dev->dout_size - sizeof(struct ec_host_request) + 1;
ret = cros_ec_prepare_tx(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, -EINVAL);
}
static void cros_ec_proto_test_check_result(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct cros_ec_command *msg = priv->msg;
int ret, i;
static enum ec_status status[] = {
EC_RES_SUCCESS,
EC_RES_INVALID_COMMAND,
EC_RES_ERROR,
EC_RES_INVALID_PARAM,
EC_RES_ACCESS_DENIED,
EC_RES_INVALID_RESPONSE,
EC_RES_INVALID_VERSION,
EC_RES_INVALID_CHECKSUM,
EC_RES_UNAVAILABLE,
EC_RES_TIMEOUT,
EC_RES_OVERFLOW,
EC_RES_INVALID_HEADER,
EC_RES_REQUEST_TRUNCATED,
EC_RES_RESPONSE_TOO_BIG,
EC_RES_BUS_ERROR,
EC_RES_BUSY,
EC_RES_INVALID_HEADER_VERSION,
EC_RES_INVALID_HEADER_CRC,
EC_RES_INVALID_DATA_CRC,
EC_RES_DUP_UNAVAILABLE,
};
for (i = 0; i < ARRAY_SIZE(status); ++i) {
msg->result = status[i];
ret = cros_ec_check_result(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, 0);
}
msg->result = EC_RES_IN_PROGRESS;
ret = cros_ec_check_result(ec_dev, msg);
KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
}
static void cros_ec_proto_test_query_all_pretest(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
/*
* cros_ec_query_all() will free din and dout and allocate them again to fit the usage by
* calling devm_kfree() and devm_kzalloc(). Set them to NULL as they aren't managed by
* ec_dev->dev but allocated statically in struct cros_ec_proto_test_priv
* (see cros_ec_proto_test_init()).
*/
ec_dev->din = NULL;
ec_dev->dout = NULL;
}
static void cros_ec_proto_test_query_all_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->protocol_versions = BIT(3) | BIT(2);
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbf;
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_response_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_cmd_versions *)mock->o_data;
data->version_mask = BIT(6) | BIT(5);
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
struct ec_response_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_cmd_versions *)mock->o_data;
data->version_mask = BIT(1);
}
/* For cros_ec_get_host_event_wake_mask(). */
{
struct ec_response_host_event_mask *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_host_event_mask *)mock->o_data;
data->mask = 0xbeef;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
KUNIT_EXPECT_EQ(test, ec_dev->max_request, 0xbe - sizeof(struct ec_host_request));
KUNIT_EXPECT_EQ(test, ec_dev->max_response, 0xef - sizeof(struct ec_host_response));
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 3);
KUNIT_EXPECT_EQ(test, ec_dev->din_size, 0xef + EC_MAX_RESPONSE_OVERHEAD);
KUNIT_EXPECT_EQ(test, ec_dev->dout_size, 0xbe + EC_MAX_REQUEST_OVERHEAD);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0xbf - sizeof(struct ec_host_request));
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_params_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_get_cmd_versions *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 7);
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
struct ec_params_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_get_cmd_versions *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_HOST_SLEEP_EVENT);
KUNIT_EXPECT_TRUE(test, ec_dev->host_sleep_v1);
}
/* For cros_ec_get_host_event_wake_mask(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
KUNIT_EXPECT_EQ(test, ec_dev->host_event_wake_mask, 0xbeef);
}
}
static void cros_ec_proto_test_query_all_no_pd_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->max_passthru = 0xbf;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
}
}
static void cros_ec_proto_test_query_all_no_pd_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->max_passthru = 0xbf;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
}
}
static void cros_ec_proto_test_query_all_legacy_normal_v3_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
struct ec_response_hello *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_hello *)mock->o_data;
data->out_data = 0xa1b2c3d4;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
struct ec_params_hello *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_hello *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
}
}
static void cros_ec_proto_test_query_all_legacy_normal_v3_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
struct ec_response_hello *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_hello *)mock->o_data;
data->out_data = 0xa1b2c3d4;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
struct ec_params_hello *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_hello *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->in_data, 0xa0b0c0d0);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, 2);
KUNIT_EXPECT_EQ(test, ec_dev->max_request, EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, ec_dev->max_response, EC_PROTO2_MAX_PARAM_SIZE);
KUNIT_EXPECT_EQ(test, ec_dev->max_passthru, 0);
KUNIT_EXPECT_PTR_EQ(test, ec_dev->pkt_xfer, NULL);
KUNIT_EXPECT_EQ(test, ec_dev->din_size, EC_PROTO2_MSG_BYTES);
KUNIT_EXPECT_EQ(test, ec_dev->dout_size, EC_PROTO2_MSG_BYTES);
}
}
static void cros_ec_proto_test_query_all_legacy_xfer_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, -EIO);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
}
}
static void cros_ec_proto_test_query_all_legacy_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, -EOPNOTSUPP);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
}
}
static void cros_ec_proto_test_query_all_legacy_data_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
struct ec_response_hello *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_hello *)mock->o_data;
data->out_data = 0xbeefbfbf;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, -EBADMSG);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
}
}
static void cros_ec_proto_test_query_all_legacy_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, -EPROTO);
KUNIT_EXPECT_EQ(test, ec_dev->proto_version, EC_PROTO_VERSION_UNKNOWN);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info_legacy(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_hello));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_hello));
}
}
static void cros_ec_proto_test_query_all_no_mkbp(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->mkbp_event_supported = 0xbf;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_response_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_cmd_versions *)mock->o_data;
data->version_mask = 0;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_params_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_get_cmd_versions *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
}
}
static void cros_ec_proto_test_query_all_no_mkbp_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->mkbp_event_supported = 0xbf;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_params_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_get_cmd_versions *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
}
}
static void cros_ec_proto_test_query_all_no_mkbp_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->mkbp_event_supported = 0xbf;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_params_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_get_cmd_versions *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, ec_dev->mkbp_event_supported, 0);
}
}
static void cros_ec_proto_test_query_all_no_host_sleep(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->host_sleep_v1 = true;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
struct ec_response_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_cmd_versions *)mock->o_data;
data->version_mask = 0;
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
}
}
static void cros_ec_proto_test_query_all_no_host_sleep_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->host_sleep_v1 = true;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
struct ec_response_get_cmd_versions *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/* In order to pollute next cros_ec_get_host_command_version_mask(). */
data = (struct ec_response_get_cmd_versions *)mock->o_data;
data->version_mask = 0xbeef;
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
KUNIT_EXPECT_FALSE(test, ec_dev->host_sleep_v1);
}
}
static void cros_ec_proto_test_query_all_default_wake_mask_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->host_event_wake_mask = U32_MAX;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_event_wake_mask(). */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For cros_ec_get_host_event_wake_mask(). */
{
u32 mask;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
mask = ec_dev->host_event_wake_mask;
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
}
}
static void cros_ec_proto_test_query_all_default_wake_mask_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
/* Set some garbage bytes. */
ec_dev->host_event_wake_mask = U32_MAX;
/* For cros_ec_get_proto_info() without passthru. */
{
struct ec_response_get_protocol_info *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
/*
* Although it doesn't check the value, provides valid sizes so that
* cros_ec_query_all() allocates din and dout correctly.
*/
data = (struct ec_response_get_protocol_info *)mock->o_data;
data->max_request_packet_size = 0xbe;
data->max_response_packet_size = 0xef;
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For get_host_event_wake_mask(). */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
cros_ec_proto_test_query_all_pretest(test);
ret = cros_ec_query_all(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
/* For cros_ec_get_proto_info() without passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_proto_info() with passthru. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command,
EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) |
EC_CMD_GET_PROTOCOL_INFO);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_protocol_info));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
/* For cros_ec_get_host_command_version_mask() for MKBP. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For cros_ec_get_host_command_version_mask() for host sleep v1. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_CMD_VERSIONS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_cmd_versions));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(struct ec_params_get_cmd_versions));
}
/* For get_host_event_wake_mask(). */
{
u32 mask;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HOST_EVENT_GET_WAKE_MASK);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_host_event_mask));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
mask = ec_dev->host_event_wake_mask;
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU), 0);
KUNIT_EXPECT_EQ(test, mask & EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS), 0);
}
}
static void cros_ec_proto_test_cmd_xfer_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct {
struct cros_ec_command msg;
u8 data[0x100];
} __packed buf;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
buf.msg.version = 0;
buf.msg.command = EC_CMD_HELLO;
buf.msg.insize = 4;
buf.msg.outsize = 2;
buf.data[0] = 0x55;
buf.data[1] = 0xaa;
{
u8 *data;
mock = cros_kunit_ec_xfer_mock_add(test, 4);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (u8 *)mock->o_data;
data[0] = 0xaa;
data[1] = 0x55;
data[2] = 0xcc;
data[3] = 0x33;
}
ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
KUNIT_EXPECT_EQ(test, ret, 4);
{
u8 *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, 4);
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
data = (u8 *)mock->i_data;
KUNIT_EXPECT_EQ(test, data[0], 0x55);
KUNIT_EXPECT_EQ(test, data[1], 0xaa);
KUNIT_EXPECT_EQ(test, buf.data[0], 0xaa);
KUNIT_EXPECT_EQ(test, buf.data[1], 0x55);
KUNIT_EXPECT_EQ(test, buf.data[2], 0xcc);
KUNIT_EXPECT_EQ(test, buf.data[3], 0x33);
}
}
static void cros_ec_proto_test_cmd_xfer_excess_msg_insize(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct {
struct cros_ec_command msg;
u8 data[0x100];
} __packed buf;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
buf.msg.version = 0;
buf.msg.command = EC_CMD_HELLO;
buf.msg.insize = 0xee + 1;
buf.msg.outsize = 2;
{
mock = cros_kunit_ec_xfer_mock_add(test, 0xcc);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
KUNIT_EXPECT_EQ(test, ret, 0xcc);
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_HELLO);
KUNIT_EXPECT_EQ(test, mock->msg.insize, 0xee);
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 2);
}
}
static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct {
struct cros_ec_command msg;
u8 data[0x100];
} __packed buf;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
buf.msg.version = 0;
buf.msg.command = EC_CMD_HELLO;
buf.msg.insize = 4;
buf.msg.outsize = 0xff + 1;
ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
static void cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct {
struct cros_ec_command msg;
u8 data[0x100];
} __packed buf;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->max_passthru = 0xdd;
buf.msg.version = 0;
buf.msg.command = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX) + EC_CMD_HELLO;
buf.msg.insize = 4;
buf.msg.outsize = 0xdd + 1;
ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
KUNIT_EXPECT_EQ(test, ret, -EMSGSIZE);
}
static void cros_ec_proto_test_cmd_xfer_protocol_v3_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->proto_version = 3;
ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 0);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 1);
}
static void cros_ec_proto_test_cmd_xfer_protocol_v3_no_op(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->proto_version = 3;
ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
ec_dev->pkt_xfer = NULL;
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EIO);
}
static void cros_ec_proto_test_cmd_xfer_protocol_v2_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->proto_version = 2;
ec_dev->cmd_xfer = cros_kunit_ec_cmd_xfer_mock;
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_cmd_xfer_mock_called, 1);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 0);
}
static void cros_ec_proto_test_cmd_xfer_protocol_v2_no_op(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->proto_version = 2;
ec_dev->cmd_xfer = NULL;
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EIO);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS. */
{
struct ec_response_get_comms_status *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_comms_status *)mock->o_data;
data->flags = 0;
}
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_comms_status));
KUNIT_EXPECT_EQ(test, msg.result, EC_RES_SUCCESS);
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_COMMS_STATUS);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_comms_status));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
cros_kunit_ec_xfer_mock_default_ret = -EAGAIN;
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
/* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
{
struct ec_response_get_comms_status *data;
int i;
for (i = 0; i < 50; ++i) {
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_comms_status *)mock->o_data;
data->flags |= EC_COMMS_STATUS_PROCESSING;
}
}
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EAGAIN);
/* For EC_CMD_GET_COMMS_STATUS EC_COMMAND_RETRIES times. */
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 51);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_xfer_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, -EIO, EC_RES_SUCCESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EIO);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_INVALID_COMMAND, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, 0);
KUNIT_EXPECT_EQ(test, msg.result, EC_RES_INVALID_COMMAND);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
}
static void cros_ec_proto_test_cmd_xfer_in_progress_return0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
ec_dev->pkt_xfer = cros_kunit_ec_pkt_xfer_mock;
/* For the first host command to return EC_RES_IN_PROGRESS. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, 0, EC_RES_IN_PROGRESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For EC_CMD_GET_COMMS_STATUS. */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EPROTO);
KUNIT_EXPECT_EQ(test, cros_kunit_ec_pkt_xfer_mock_called, 2);
}
static void cros_ec_proto_test_cmd_xfer_status_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
/* For cros_ec_cmd_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_add(test, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, 0);
}
static void cros_ec_proto_test_cmd_xfer_status_xfer_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
struct cros_ec_command msg;
memset(&msg, 0, sizeof(msg));
/* For cros_ec_cmd_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, -EPROTO);
}
static void cros_ec_proto_test_cmd_xfer_status_return_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret, i;
struct cros_ec_command msg;
static const int map[] = {
[EC_RES_SUCCESS] = 0,
[EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
[EC_RES_ERROR] = -EIO,
[EC_RES_INVALID_PARAM] = -EINVAL,
[EC_RES_ACCESS_DENIED] = -EACCES,
[EC_RES_INVALID_RESPONSE] = -EPROTO,
[EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
[EC_RES_INVALID_CHECKSUM] = -EBADMSG,
/*
* EC_RES_IN_PROGRESS is special because cros_ec_send_command() has extra logic to
* handle it. Note that default cros_kunit_ec_xfer_mock_default_ret == 0 thus
* cros_ec_xfer_command() in cros_ec_wait_until_complete() returns 0. As a result,
* it returns -EPROTO without calling cros_ec_map_error().
*/
[EC_RES_IN_PROGRESS] = -EPROTO,
[EC_RES_UNAVAILABLE] = -ENODATA,
[EC_RES_TIMEOUT] = -ETIMEDOUT,
[EC_RES_OVERFLOW] = -EOVERFLOW,
[EC_RES_INVALID_HEADER] = -EBADR,
[EC_RES_REQUEST_TRUNCATED] = -EBADR,
[EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
[EC_RES_BUS_ERROR] = -EFAULT,
[EC_RES_BUSY] = -EBUSY,
[EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
[EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
[EC_RES_INVALID_DATA_CRC] = -EBADMSG,
[EC_RES_DUP_UNAVAILABLE] = -ENODATA,
};
memset(&msg, 0, sizeof(msg));
for (i = 0; i < ARRAY_SIZE(map); ++i) {
mock = cros_kunit_ec_xfer_mock_addx(test, 0, i, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
ret = cros_ec_cmd_xfer_status(ec_dev, &msg);
KUNIT_EXPECT_EQ(test, ret, map[i]);
}
}
static void cros_ec_proto_test_get_next_event_no_mkbp_event(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
bool wake_event, more_events;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->mkbp_event_supported = 0;
/* Set some garbage bytes. */
wake_event = false;
more_events = true;
/* For get_keyboard_state_event(). */
{
union ec_response_get_next_data_v1 *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (union ec_response_get_next_data_v1 *)mock->o_data;
data->host_event = 0xbeef;
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
KUNIT_EXPECT_EQ(test, ret, sizeof(union ec_response_get_next_data_v1));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_KEY_MATRIX);
KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.host_event, 0xbeef);
KUNIT_EXPECT_TRUE(test, wake_event);
KUNIT_EXPECT_FALSE(test, more_events);
/* For get_keyboard_state_event(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MKBP_STATE);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(union ec_response_get_next_data_v1));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
ec_dev->mkbp_event_supported = 1;
ec_dev->suspended = true;
ret = cros_ec_get_next_event(ec_dev, NULL, NULL);
KUNIT_EXPECT_EQ(test, ret, -EHOSTDOWN);
}
static void cros_ec_proto_test_get_next_event_mkbp_event_version0(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
bool wake_event, more_events;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->mkbp_event_supported = 1;
/* Set some garbage bytes. */
wake_event = true;
more_events = false;
/* For get_next_event_xfer(). */
{
struct ec_response_get_next_event *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_next_event *)mock->o_data;
data->event_type = EC_MKBP_EVENT_SENSOR_FIFO | EC_MKBP_HAS_MORE_EVENTS;
data->data.sysrq = 0xbeef;
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_SENSOR_FIFO);
KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
KUNIT_EXPECT_FALSE(test, wake_event);
KUNIT_EXPECT_TRUE(test, more_events);
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_next_event));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_next_event_mkbp_event_version2(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
bool wake_event, more_events;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->mkbp_event_supported = 3;
/* Set some garbage bytes. */
wake_event = false;
more_events = true;
/* For get_next_event_xfer(). */
{
struct ec_response_get_next_event_v1 *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_next_event_v1 *)mock->o_data;
data->event_type = EC_MKBP_EVENT_FINGERPRINT;
data->data.sysrq = 0xbeef;
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, &more_events);
KUNIT_EXPECT_EQ(test, ret, sizeof(struct ec_response_get_next_event_v1));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_FINGERPRINT);
KUNIT_EXPECT_EQ(test, ec_dev->event_data.data.sysrq, 0xbeef);
KUNIT_EXPECT_TRUE(test, wake_event);
KUNIT_EXPECT_FALSE(test, more_events);
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_next_event_v1));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
bool wake_event;
struct ec_response_get_next_event_v1 *data;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->mkbp_event_supported = 3;
ec_dev->host_event_wake_mask = U32_MAX;
/* Set some garbage bytes. */
wake_event = true;
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_add(test,
sizeof(data->event_type) +
sizeof(data->data.host_event));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_next_event_v1 *)mock->o_data;
data->event_type = EC_MKBP_EVENT_HOST_EVENT;
put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC), &data->data.host_event);
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
KUNIT_EXPECT_FALSE(test, wake_event);
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_next_event_v1));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
bool wake_event;
struct ec_response_get_next_event_v1 *data;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->mkbp_event_supported = 3;
ec_dev->host_event_wake_mask = U32_MAX & ~EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED);
/* Set some garbage bytes. */
wake_event = true;
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_add(test,
sizeof(data->event_type) +
sizeof(data->data.host_event));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_next_event_v1 *)mock->o_data;
data->event_type = EC_MKBP_EVENT_HOST_EVENT;
put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED),
&data->data.host_event);
}
ret = cros_ec_get_next_event(ec_dev, &wake_event, NULL);
KUNIT_EXPECT_EQ(test, ret, sizeof(data->event_type) + sizeof(data->data.host_event));
KUNIT_EXPECT_EQ(test, ec_dev->event_data.event_type, EC_MKBP_EVENT_HOST_EVENT);
KUNIT_EXPECT_FALSE(test, wake_event);
/* For get_next_event_xfer(). */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 2);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_NEXT_EVENT);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_get_next_event_v1));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_host_event_no_mkbp_event(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
ec_dev->mkbp_event_supported = 0;
ret = cros_ec_get_host_event(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
}
static void cros_ec_proto_test_get_host_event_not_host_event(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
ec_dev->mkbp_event_supported = 1;
ec_dev->event_data.event_type = EC_MKBP_EVENT_FINGERPRINT;
ret = cros_ec_get_host_event(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
}
static void cros_ec_proto_test_get_host_event_wrong_event_size(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
ec_dev->mkbp_event_supported = 1;
ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
ec_dev->event_size = 0xff;
ret = cros_ec_get_host_event(ec_dev);
KUNIT_EXPECT_EQ(test, ret, 0);
}
static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
int ret;
ec_dev->mkbp_event_supported = 1;
ec_dev->event_data.event_type = EC_MKBP_EVENT_HOST_EVENT;
ec_dev->event_size = sizeof(ec_dev->event_data.data.host_event);
put_unaligned_le32(EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC),
&ec_dev->event_data.data.host_event);
ret = cros_ec_get_host_event(ec_dev);
KUNIT_EXPECT_EQ(test, ret, EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC));
}
static void cros_ec_proto_test_check_features_cached(struct kunit *test)
{
int ret, i;
static struct cros_ec_dev ec;
ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
ret = cros_ec_check_features(&ec, i);
switch (i) {
case EC_FEATURE_FINGERPRINT:
case EC_FEATURE_SCP:
KUNIT_EXPECT_TRUE(test, ret);
break;
default:
KUNIT_EXPECT_FALSE(test, ret);
break;
}
}
}
static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret, i;
static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec.ec_dev = ec_dev;
ec.dev = ec_dev->dev;
ec.cmd_offset = 0;
ec.features.flags[0] = -1;
ec.features.flags[1] = -1;
/* For EC_CMD_GET_FEATURES. */
{
struct ec_response_get_features *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_get_features *)mock->o_data;
data->flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
data->flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
}
for (i = 0; i < EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK; ++i) {
ret = cros_ec_check_features(&ec, i);
switch (i) {
case EC_FEATURE_FINGERPRINT:
case EC_FEATURE_SCP:
KUNIT_EXPECT_TRUE(test, ret);
break;
default:
KUNIT_EXPECT_FALSE(test, ret);
break;
}
}
/* For EC_CMD_GET_FEATURES. */
{
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_GET_FEATURES);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_get_features));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, 0);
}
}
static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec.ec_dev = ec_dev;
ec.dev = ec_dev->dev;
ec.cmd_offset = 0;
/* For EC_CMD_MOTION_SENSE_CMD. */
{
struct ec_response_motion_sense *data;
mock = cros_kunit_ec_xfer_mock_add(test, sizeof(*data));
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (struct ec_response_motion_sense *)mock->o_data;
data->dump.sensor_count = 0xbf;
}
ret = cros_ec_get_sensor_count(&ec);
KUNIT_EXPECT_EQ(test, ret, 0xbf);
/* For EC_CMD_MOTION_SENSE_CMD. */
{
struct ec_params_motion_sense *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_motion_sense *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
}
}
static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec.ec_dev = ec_dev;
ec.dev = ec_dev->dev;
ec.cmd_offset = 0;
/* For EC_CMD_MOTION_SENSE_CMD. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
ret = cros_ec_get_sensor_count(&ec);
KUNIT_EXPECT_EQ(test, ret, -EPROTO);
/* For EC_CMD_MOTION_SENSE_CMD. */
{
struct ec_params_motion_sense *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
KUNIT_EXPECT_EQ(test, mock->msg.insize, sizeof(struct ec_response_motion_sense));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_motion_sense *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
}
}
static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret, i;
static struct cros_ec_dev ec;
struct {
u8 readmem_data;
int expected_result;
} test_data[] = {
{ 0, 0 },
{ EC_MEMMAP_ACC_STATUS_PRESENCE_BIT, 2 },
};
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
ec_dev->cmd_readmem = cros_kunit_readmem_mock;
ec.ec_dev = ec_dev;
ec.dev = ec_dev->dev;
ec.cmd_offset = 0;
for (i = 0; i < ARRAY_SIZE(test_data); ++i) {
/* For EC_CMD_MOTION_SENSE_CMD. */
{
mock = cros_kunit_ec_xfer_mock_addx(test, -EPROTO, EC_RES_SUCCESS, 0);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
}
/* For readmem. */
{
cros_kunit_readmem_mock_data = kunit_kzalloc(test, 1, GFP_KERNEL);
KUNIT_ASSERT_PTR_NE(test, cros_kunit_readmem_mock_data, NULL);
cros_kunit_readmem_mock_data[0] = test_data[i].readmem_data;
cros_kunit_ec_xfer_mock_default_ret = 1;
}
ret = cros_ec_get_sensor_count(&ec);
KUNIT_EXPECT_EQ(test, ret, test_data[i].expected_result);
/* For EC_CMD_MOTION_SENSE_CMD. */
{
struct ec_params_motion_sense *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 1);
KUNIT_EXPECT_EQ(test, mock->msg.command, EC_CMD_MOTION_SENSE_CMD);
KUNIT_EXPECT_EQ(test, mock->msg.insize,
sizeof(struct ec_response_motion_sense));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, sizeof(*data));
data = (struct ec_params_motion_sense *)mock->i_data;
KUNIT_EXPECT_EQ(test, data->cmd, MOTIONSENSE_CMD_DUMP);
}
/* For readmem. */
{
KUNIT_EXPECT_EQ(test, cros_kunit_readmem_mock_offset, EC_MEMMAP_ACC_STATUS);
}
}
}
static void cros_ec_proto_test_ec_cmd(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
u8 out[3], in[2];
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
out[0] = 0xdd;
out[1] = 0xcc;
out[2] = 0xbb;
{
u8 *data;
mock = cros_kunit_ec_xfer_mock_add(test, 2);
KUNIT_ASSERT_PTR_NE(test, mock, NULL);
data = (u8 *)mock->o_data;
data[0] = 0xaa;
data[1] = 0x99;
}
ret = cros_ec_cmd(ec_dev, 0x88, 0x77, out, ARRAY_SIZE(out), in, ARRAY_SIZE(in));
KUNIT_EXPECT_EQ(test, ret, 2);
{
u8 *data;
mock = cros_kunit_ec_xfer_mock_next();
KUNIT_EXPECT_PTR_NE(test, mock, NULL);
KUNIT_EXPECT_EQ(test, mock->msg.version, 0x88);
KUNIT_EXPECT_EQ(test, mock->msg.command, 0x77);
KUNIT_EXPECT_EQ(test, mock->msg.insize, ARRAY_SIZE(in));
KUNIT_EXPECT_EQ(test, mock->msg.outsize, ARRAY_SIZE(out));
data = (u8 *)mock->i_data;
KUNIT_EXPECT_EQ(test, data[0], 0xdd);
KUNIT_EXPECT_EQ(test, data[1], 0xcc);
KUNIT_EXPECT_EQ(test, data[2], 0xbb);
}
}
static void cros_ec_proto_test_release(struct device *dev)
{
}
static int cros_ec_proto_test_init(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv;
struct cros_ec_device *ec_dev;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
test->priv = priv;
ec_dev = &priv->ec_dev;
ec_dev->dout = (u8 *)priv->dout;
ec_dev->dout_size = ARRAY_SIZE(priv->dout);
ec_dev->din = (u8 *)priv->din;
ec_dev->din_size = ARRAY_SIZE(priv->din);
ec_dev->proto_version = EC_HOST_REQUEST_VERSION;
ec_dev->dev = kunit_kzalloc(test, sizeof(*ec_dev->dev), GFP_KERNEL);
if (!ec_dev->dev)
return -ENOMEM;
device_initialize(ec_dev->dev);
dev_set_name(ec_dev->dev, "cros_ec_proto_test");
ec_dev->dev->release = cros_ec_proto_test_release;
ec_dev->cmd_xfer = cros_kunit_ec_xfer_mock;
ec_dev->pkt_xfer = cros_kunit_ec_xfer_mock;
priv->msg = (struct cros_ec_command *)priv->_msg;
cros_kunit_mock_reset();
return 0;
}
static void cros_ec_proto_test_exit(struct kunit *test)
{
struct cros_ec_proto_test_priv *priv = test->priv;
struct cros_ec_device *ec_dev = &priv->ec_dev;
put_device(ec_dev->dev);
}
static struct kunit_case cros_ec_proto_test_cases[] = {
KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_normal),
KUNIT_CASE(cros_ec_proto_test_prepare_tx_legacy_bad_msg_outsize),
KUNIT_CASE(cros_ec_proto_test_prepare_tx_normal),
KUNIT_CASE(cros_ec_proto_test_prepare_tx_bad_msg_outsize),
KUNIT_CASE(cros_ec_proto_test_check_result),
KUNIT_CASE(cros_ec_proto_test_query_all_normal),
KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return_error),
KUNIT_CASE(cros_ec_proto_test_query_all_no_pd_return0),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return_error),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_normal_v3_return0),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_xfer_error),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return_error),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_data_error),
KUNIT_CASE(cros_ec_proto_test_query_all_legacy_return0),
KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp),
KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return_error),
KUNIT_CASE(cros_ec_proto_test_query_all_no_mkbp_return0),
KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep),
KUNIT_CASE(cros_ec_proto_test_query_all_no_host_sleep_return0),
KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return_error),
KUNIT_CASE(cros_ec_proto_test_query_all_default_wake_mask_return0),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_normal),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_insize),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_without_passthru),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_excess_msg_outsize_with_passthru),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_normal),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v3_no_op),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_normal),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_protocol_v2_no_op),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_normal),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_eagain),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_retries_status_processing),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_xfer_error),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return_error),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_in_progress_return0),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_normal),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_xfer_error),
KUNIT_CASE(cros_ec_proto_test_cmd_xfer_status_return_error),
KUNIT_CASE(cros_ec_proto_test_get_next_event_no_mkbp_event),
KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_ec_suspended),
KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version0),
KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_version2),
KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_rtc),
KUNIT_CASE(cros_ec_proto_test_get_next_event_mkbp_event_host_event_masked),
KUNIT_CASE(cros_ec_proto_test_get_host_event_no_mkbp_event),
KUNIT_CASE(cros_ec_proto_test_get_host_event_not_host_event),
KUNIT_CASE(cros_ec_proto_test_get_host_event_wrong_event_size),
KUNIT_CASE(cros_ec_proto_test_get_host_event_normal),
KUNIT_CASE(cros_ec_proto_test_check_features_cached),
KUNIT_CASE(cros_ec_proto_test_check_features_not_cached),
KUNIT_CASE(cros_ec_proto_test_get_sensor_count_normal),
KUNIT_CASE(cros_ec_proto_test_get_sensor_count_xfer_error),
KUNIT_CASE(cros_ec_proto_test_get_sensor_count_legacy),
KUNIT_CASE(cros_ec_proto_test_ec_cmd),
{}
};
static struct kunit_suite cros_ec_proto_test_suite = {
.name = "cros_ec_proto_test",
.init = cros_ec_proto_test_init,
.exit = cros_ec_proto_test_exit,
.test_cases = cros_ec_proto_test_cases,
};
kunit_test_suite(cros_ec_proto_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_ec_proto_test.c |
// SPDX-License-Identifier: GPL-2.0+
// Expose the vboot context nvram to userspace
//
// Copyright (C) 2012 Google, Inc.
// Copyright (C) 2015 Collabora Ltd.
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/slab.h>
#define DRV_NAME "cros-ec-vbc"
static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *att, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct cros_ec_device *ecdev = ec->ec_dev;
struct ec_params_vbnvcontext *params;
struct cros_ec_command *msg;
int err;
const size_t para_sz = sizeof(params->op);
const size_t resp_sz = sizeof(struct ec_response_vbnvcontext);
const size_t payload = max(para_sz, resp_sz);
msg = kmalloc(sizeof(*msg) + payload, GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* NB: we only kmalloc()ated enough space for the op field */
params = (struct ec_params_vbnvcontext *)msg->data;
params->op = EC_VBNV_CONTEXT_OP_READ;
msg->version = EC_VER_VBNV_CONTEXT;
msg->command = EC_CMD_VBNV_CONTEXT;
msg->outsize = para_sz;
msg->insize = resp_sz;
err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending read request: %d\n", err);
kfree(msg);
return err;
}
memcpy(buf, msg->data, resp_sz);
kfree(msg);
return resp_sz;
}
static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct cros_ec_device *ecdev = ec->ec_dev;
struct ec_params_vbnvcontext *params;
struct cros_ec_command *msg;
int err;
const size_t para_sz = sizeof(*params);
const size_t data_sz = sizeof(params->block);
/* Only write full values */
if (count != data_sz)
return -EINVAL;
msg = kmalloc(sizeof(*msg) + para_sz, GFP_KERNEL);
if (!msg)
return -ENOMEM;
params = (struct ec_params_vbnvcontext *)msg->data;
params->op = EC_VBNV_CONTEXT_OP_WRITE;
memcpy(params->block, buf, data_sz);
msg->version = EC_VER_VBNV_CONTEXT;
msg->command = EC_CMD_VBNV_CONTEXT;
msg->outsize = para_sz;
msg->insize = 0;
err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending write request: %d\n", err);
kfree(msg);
return err;
}
kfree(msg);
return data_sz;
}
static BIN_ATTR_RW(vboot_context, 16);
static struct bin_attribute *cros_ec_vbc_bin_attrs[] = {
&bin_attr_vboot_context,
NULL
};
static const struct attribute_group cros_ec_vbc_attr_group = {
.name = "vbc",
.bin_attrs = cros_ec_vbc_bin_attrs,
};
static int cros_ec_vbc_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
struct device *dev = &pd->dev;
int ret;
ret = sysfs_create_group(&ec_dev->class_dev.kobj,
&cros_ec_vbc_attr_group);
if (ret < 0)
dev_err(dev, "failed to create %s attributes. err=%d\n",
cros_ec_vbc_attr_group.name, ret);
return ret;
}
static int cros_ec_vbc_remove(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
sysfs_remove_group(&ec_dev->class_dev.kobj,
&cros_ec_vbc_attr_group);
return 0;
}
static struct platform_driver cros_ec_vbc_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_vbc_probe,
.remove = cros_ec_vbc_remove,
};
module_platform_driver(cros_ec_vbc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the vboot context nvram to userspace");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_ec_vbc.c |
// SPDX-License-Identifier: GPL-2.0
// Trace events for the ChromeOS Embedded Controller
//
// Copyright 2019 Google LLC.
#define TRACE_SYMBOL(a) {a, #a}
// Generate the list using the following script:
// sed -n 's/^#define \(EC_CMD_[[:alnum:]_]*\)\s.*/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
#define EC_CMDS \
TRACE_SYMBOL(EC_CMD_ACPI_READ), \
TRACE_SYMBOL(EC_CMD_ACPI_WRITE), \
TRACE_SYMBOL(EC_CMD_ACPI_BURST_ENABLE), \
TRACE_SYMBOL(EC_CMD_ACPI_BURST_DISABLE), \
TRACE_SYMBOL(EC_CMD_ACPI_QUERY_EVENT), \
TRACE_SYMBOL(EC_CMD_PROTO_VERSION), \
TRACE_SYMBOL(EC_CMD_HELLO), \
TRACE_SYMBOL(EC_CMD_GET_VERSION), \
TRACE_SYMBOL(EC_CMD_READ_TEST), \
TRACE_SYMBOL(EC_CMD_GET_BUILD_INFO), \
TRACE_SYMBOL(EC_CMD_GET_CHIP_INFO), \
TRACE_SYMBOL(EC_CMD_GET_BOARD_VERSION), \
TRACE_SYMBOL(EC_CMD_READ_MEMMAP), \
TRACE_SYMBOL(EC_CMD_GET_CMD_VERSIONS), \
TRACE_SYMBOL(EC_CMD_GET_COMMS_STATUS), \
TRACE_SYMBOL(EC_CMD_TEST_PROTOCOL), \
TRACE_SYMBOL(EC_CMD_GET_PROTOCOL_INFO), \
TRACE_SYMBOL(EC_CMD_GSV_PAUSE_IN_S5), \
TRACE_SYMBOL(EC_CMD_GET_FEATURES), \
TRACE_SYMBOL(EC_CMD_GET_SKU_ID), \
TRACE_SYMBOL(EC_CMD_SET_SKU_ID), \
TRACE_SYMBOL(EC_CMD_FLASH_INFO), \
TRACE_SYMBOL(EC_CMD_FLASH_READ), \
TRACE_SYMBOL(EC_CMD_FLASH_WRITE), \
TRACE_SYMBOL(EC_CMD_FLASH_ERASE), \
TRACE_SYMBOL(EC_CMD_FLASH_PROTECT), \
TRACE_SYMBOL(EC_CMD_FLASH_REGION_INFO), \
TRACE_SYMBOL(EC_CMD_VBNV_CONTEXT), \
TRACE_SYMBOL(EC_CMD_FLASH_SPI_INFO), \
TRACE_SYMBOL(EC_CMD_FLASH_SELECT), \
TRACE_SYMBOL(EC_CMD_PWM_GET_FAN_TARGET_RPM), \
TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_TARGET_RPM), \
TRACE_SYMBOL(EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT), \
TRACE_SYMBOL(EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT), \
TRACE_SYMBOL(EC_CMD_PWM_SET_FAN_DUTY), \
TRACE_SYMBOL(EC_CMD_PWM_SET_DUTY), \
TRACE_SYMBOL(EC_CMD_PWM_GET_DUTY), \
TRACE_SYMBOL(EC_CMD_LIGHTBAR_CMD), \
TRACE_SYMBOL(EC_CMD_LED_CONTROL), \
TRACE_SYMBOL(EC_CMD_VBOOT_HASH), \
TRACE_SYMBOL(EC_CMD_MOTION_SENSE_CMD), \
TRACE_SYMBOL(EC_CMD_FORCE_LID_OPEN), \
TRACE_SYMBOL(EC_CMD_CONFIG_POWER_BUTTON), \
TRACE_SYMBOL(EC_CMD_USB_CHARGE_SET_MODE), \
TRACE_SYMBOL(EC_CMD_PSTORE_INFO), \
TRACE_SYMBOL(EC_CMD_PSTORE_READ), \
TRACE_SYMBOL(EC_CMD_PSTORE_WRITE), \
TRACE_SYMBOL(EC_CMD_RTC_GET_VALUE), \
TRACE_SYMBOL(EC_CMD_RTC_GET_ALARM), \
TRACE_SYMBOL(EC_CMD_RTC_SET_VALUE), \
TRACE_SYMBOL(EC_CMD_RTC_SET_ALARM), \
TRACE_SYMBOL(EC_CMD_PORT80_LAST_BOOT), \
TRACE_SYMBOL(EC_CMD_PORT80_READ), \
TRACE_SYMBOL(EC_CMD_VSTORE_INFO), \
TRACE_SYMBOL(EC_CMD_VSTORE_READ), \
TRACE_SYMBOL(EC_CMD_VSTORE_WRITE), \
TRACE_SYMBOL(EC_CMD_THERMAL_SET_THRESHOLD), \
TRACE_SYMBOL(EC_CMD_THERMAL_GET_THRESHOLD), \
TRACE_SYMBOL(EC_CMD_THERMAL_AUTO_FAN_CTRL), \
TRACE_SYMBOL(EC_CMD_TMP006_GET_CALIBRATION), \
TRACE_SYMBOL(EC_CMD_TMP006_SET_CALIBRATION), \
TRACE_SYMBOL(EC_CMD_TMP006_GET_RAW), \
TRACE_SYMBOL(EC_CMD_MKBP_STATE), \
TRACE_SYMBOL(EC_CMD_MKBP_INFO), \
TRACE_SYMBOL(EC_CMD_MKBP_SIMULATE_KEY), \
TRACE_SYMBOL(EC_CMD_GET_KEYBOARD_ID), \
TRACE_SYMBOL(EC_CMD_MKBP_SET_CONFIG), \
TRACE_SYMBOL(EC_CMD_MKBP_GET_CONFIG), \
TRACE_SYMBOL(EC_CMD_KEYSCAN_SEQ_CTRL), \
TRACE_SYMBOL(EC_CMD_GET_NEXT_EVENT), \
TRACE_SYMBOL(EC_CMD_KEYBOARD_FACTORY_TEST), \
TRACE_SYMBOL(EC_CMD_TEMP_SENSOR_GET_INFO), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_B), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SMI_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_SCI_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_GET_WAKE_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SMI_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_SCI_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_SET_WAKE_MASK), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT_CLEAR_B), \
TRACE_SYMBOL(EC_CMD_HOST_EVENT), \
TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_BKLIGHT), \
TRACE_SYMBOL(EC_CMD_SWITCH_ENABLE_WIRELESS), \
TRACE_SYMBOL(EC_CMD_GPIO_SET), \
TRACE_SYMBOL(EC_CMD_GPIO_GET), \
TRACE_SYMBOL(EC_CMD_I2C_READ), \
TRACE_SYMBOL(EC_CMD_I2C_WRITE), \
TRACE_SYMBOL(EC_CMD_CHARGE_CONTROL), \
TRACE_SYMBOL(EC_CMD_CONSOLE_SNAPSHOT), \
TRACE_SYMBOL(EC_CMD_CONSOLE_READ), \
TRACE_SYMBOL(EC_CMD_BATTERY_CUT_OFF), \
TRACE_SYMBOL(EC_CMD_USB_MUX), \
TRACE_SYMBOL(EC_CMD_LDO_SET), \
TRACE_SYMBOL(EC_CMD_LDO_GET), \
TRACE_SYMBOL(EC_CMD_POWER_INFO), \
TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU), \
TRACE_SYMBOL(EC_CMD_HANG_DETECT), \
TRACE_SYMBOL(EC_CMD_CHARGE_STATE), \
TRACE_SYMBOL(EC_CMD_CHARGE_CURRENT_LIMIT), \
TRACE_SYMBOL(EC_CMD_EXTERNAL_POWER_LIMIT), \
TRACE_SYMBOL(EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT), \
TRACE_SYMBOL(EC_CMD_HIBERNATION_DELAY), \
TRACE_SYMBOL(EC_CMD_HOST_SLEEP_EVENT), \
TRACE_SYMBOL(EC_CMD_DEVICE_EVENT), \
TRACE_SYMBOL(EC_CMD_SB_READ_WORD), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_WORD), \
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
TRACE_SYMBOL(EC_CMD_SB_FW_UPDATE), \
TRACE_SYMBOL(EC_CMD_ENTERING_MODE), \
TRACE_SYMBOL(EC_CMD_I2C_PASSTHRU_PROTECT), \
TRACE_SYMBOL(EC_CMD_CEC_WRITE_MSG), \
TRACE_SYMBOL(EC_CMD_CEC_SET), \
TRACE_SYMBOL(EC_CMD_CEC_GET), \
TRACE_SYMBOL(EC_CMD_EC_CODEC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
TRACE_SYMBOL(EC_CMD_REBOOT), \
TRACE_SYMBOL(EC_CMD_RESEND_RESPONSE), \
TRACE_SYMBOL(EC_CMD_VERSION0), \
TRACE_SYMBOL(EC_CMD_PD_EXCHANGE_STATUS), \
TRACE_SYMBOL(EC_CMD_PD_HOST_EVENT_STATUS), \
TRACE_SYMBOL(EC_CMD_USB_PD_CONTROL), \
TRACE_SYMBOL(EC_CMD_USB_PD_PORTS), \
TRACE_SYMBOL(EC_CMD_USB_PD_POWER_INFO), \
TRACE_SYMBOL(EC_CMD_CHARGE_PORT_COUNT), \
TRACE_SYMBOL(EC_CMD_USB_PD_FW_UPDATE), \
TRACE_SYMBOL(EC_CMD_USB_PD_RW_HASH_ENTRY), \
TRACE_SYMBOL(EC_CMD_USB_PD_DEV_INFO), \
TRACE_SYMBOL(EC_CMD_USB_PD_DISCOVERY), \
TRACE_SYMBOL(EC_CMD_PD_CHARGE_PORT_OVERRIDE), \
TRACE_SYMBOL(EC_CMD_PD_GET_LOG_ENTRY), \
TRACE_SYMBOL(EC_CMD_USB_PD_GET_AMODE), \
TRACE_SYMBOL(EC_CMD_USB_PD_SET_AMODE), \
TRACE_SYMBOL(EC_CMD_PD_WRITE_LOG_ENTRY), \
TRACE_SYMBOL(EC_CMD_PD_CONTROL), \
TRACE_SYMBOL(EC_CMD_USB_PD_MUX_INFO), \
TRACE_SYMBOL(EC_CMD_PD_CHIP_INFO), \
TRACE_SYMBOL(EC_CMD_RWSIG_CHECK_STATUS), \
TRACE_SYMBOL(EC_CMD_RWSIG_ACTION), \
TRACE_SYMBOL(EC_CMD_EFS_VERIFY), \
TRACE_SYMBOL(EC_CMD_GET_CROS_BOARD_INFO), \
TRACE_SYMBOL(EC_CMD_SET_CROS_BOARD_INFO), \
TRACE_SYMBOL(EC_CMD_GET_UPTIME_INFO), \
TRACE_SYMBOL(EC_CMD_ADD_ENTROPY), \
TRACE_SYMBOL(EC_CMD_ADC_READ), \
TRACE_SYMBOL(EC_CMD_ROLLBACK_INFO), \
TRACE_SYMBOL(EC_CMD_AP_RESET), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_INFO), \
TRACE_SYMBOL(EC_CMD_REGULATOR_ENABLE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_IS_ENABLED), \
TRACE_SYMBOL(EC_CMD_REGULATOR_SET_VOLTAGE), \
TRACE_SYMBOL(EC_CMD_REGULATOR_GET_VOLTAGE), \
TRACE_SYMBOL(EC_CMD_CR51_BASE), \
TRACE_SYMBOL(EC_CMD_CR51_LAST), \
TRACE_SYMBOL(EC_CMD_FP_PASSTHRU), \
TRACE_SYMBOL(EC_CMD_FP_MODE), \
TRACE_SYMBOL(EC_CMD_FP_INFO), \
TRACE_SYMBOL(EC_CMD_FP_FRAME), \
TRACE_SYMBOL(EC_CMD_FP_TEMPLATE), \
TRACE_SYMBOL(EC_CMD_FP_CONTEXT), \
TRACE_SYMBOL(EC_CMD_FP_STATS), \
TRACE_SYMBOL(EC_CMD_FP_SEED), \
TRACE_SYMBOL(EC_CMD_FP_ENC_STATUS), \
TRACE_SYMBOL(EC_CMD_TP_SELF_TEST), \
TRACE_SYMBOL(EC_CMD_TP_FRAME_INFO), \
TRACE_SYMBOL(EC_CMD_TP_FRAME_SNAPSHOT), \
TRACE_SYMBOL(EC_CMD_TP_FRAME_GET), \
TRACE_SYMBOL(EC_CMD_BATTERY_GET_STATIC), \
TRACE_SYMBOL(EC_CMD_BATTERY_GET_DYNAMIC), \
TRACE_SYMBOL(EC_CMD_CHARGER_CONTROL), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_BASE), \
TRACE_SYMBOL(EC_CMD_BOARD_SPECIFIC_LAST)
/* See the enum ec_status in include/linux/platform_data/cros_ec_commands.h */
#define EC_RESULT \
TRACE_SYMBOL(EC_RES_SUCCESS), \
TRACE_SYMBOL(EC_RES_INVALID_COMMAND), \
TRACE_SYMBOL(EC_RES_ERROR), \
TRACE_SYMBOL(EC_RES_INVALID_PARAM), \
TRACE_SYMBOL(EC_RES_ACCESS_DENIED), \
TRACE_SYMBOL(EC_RES_INVALID_RESPONSE), \
TRACE_SYMBOL(EC_RES_INVALID_VERSION), \
TRACE_SYMBOL(EC_RES_INVALID_CHECKSUM), \
TRACE_SYMBOL(EC_RES_IN_PROGRESS), \
TRACE_SYMBOL(EC_RES_UNAVAILABLE), \
TRACE_SYMBOL(EC_RES_TIMEOUT), \
TRACE_SYMBOL(EC_RES_OVERFLOW), \
TRACE_SYMBOL(EC_RES_INVALID_HEADER), \
TRACE_SYMBOL(EC_RES_REQUEST_TRUNCATED), \
TRACE_SYMBOL(EC_RES_RESPONSE_TOO_BIG), \
TRACE_SYMBOL(EC_RES_BUS_ERROR), \
TRACE_SYMBOL(EC_RES_BUSY), \
TRACE_SYMBOL(EC_RES_INVALID_HEADER_VERSION), \
TRACE_SYMBOL(EC_RES_INVALID_HEADER_CRC), \
TRACE_SYMBOL(EC_RES_INVALID_DATA_CRC), \
TRACE_SYMBOL(EC_RES_DUP_UNAVAILABLE)
#define CREATE_TRACE_POINTS
#include "cros_ec_trace.h"
| linux-master | drivers/platform/chrome/cros_ec_trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC
*
* This driver serves as the receiver of cros_ec PD host events.
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#define DRV_NAME "cros-usbpd-notify"
#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi"
#define ACPI_DRV_NAME "GOOG0003"
static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list);
struct cros_usbpd_notify_data {
struct device *dev;
struct cros_ec_device *ec;
struct notifier_block nb;
};
/**
* cros_usbpd_register_notify - Register a notifier callback for PD events.
* @nb: Notifier block pointer to register
*
* On ACPI platforms this corresponds to host events on the ECPD
* "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events
* for USB PD events.
*
* Return: 0 on success or negative error code.
*/
int cros_usbpd_register_notify(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&cros_usbpd_notifier_list,
nb);
}
EXPORT_SYMBOL_GPL(cros_usbpd_register_notify);
/**
* cros_usbpd_unregister_notify - Unregister notifier callback for PD events.
* @nb: Notifier block pointer to unregister
*
* Unregister a notifier callback that was previously registered with
* cros_usbpd_register_notify().
*/
void cros_usbpd_unregister_notify(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify);
static void cros_usbpd_get_event_and_notify(struct device *dev,
struct cros_ec_device *ec_dev)
{
struct ec_response_host_event_status host_event_status;
u32 event = 0;
int ret;
/*
* We still send a 0 event out to older devices which don't
* have the updated device heirarchy.
*/
if (!ec_dev) {
dev_dbg(dev,
"EC device inaccessible; sending 0 event status.\n");
goto send_notify;
}
/* Check for PD host events on EC. */
ret = cros_ec_cmd(ec_dev, 0, EC_CMD_PD_HOST_EVENT_STATUS,
NULL, 0, &host_event_status, sizeof(host_event_status));
if (ret < 0) {
dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
goto send_notify;
}
event = host_event_status.status;
send_notify:
blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL);
}
#ifdef CONFIG_ACPI
static void cros_usbpd_notify_acpi(acpi_handle device, u32 event, void *data)
{
struct cros_usbpd_notify_data *pdnotify = data;
cros_usbpd_get_event_and_notify(pdnotify->dev, pdnotify->ec);
}
static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
{
struct cros_usbpd_notify_data *pdnotify;
struct device *dev = &pdev->dev;
struct acpi_device *adev;
struct cros_ec_device *ec_dev;
acpi_status status;
adev = ACPI_COMPANION(dev);
pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
if (!pdnotify)
return -ENOMEM;
/* Get the EC device pointer needed to talk to the EC. */
ec_dev = dev_get_drvdata(dev->parent);
if (!ec_dev) {
/*
* We continue even for older devices which don't have the
* correct device heirarchy, namely, GOOG0003 is a child
* of GOOG0004.
*/
dev_warn(dev, "Couldn't get Chrome EC device pointer.\n");
}
pdnotify->dev = dev;
pdnotify->ec = ec_dev;
status = acpi_install_notify_handler(adev->handle,
ACPI_ALL_NOTIFY,
cros_usbpd_notify_acpi,
pdnotify);
if (ACPI_FAILURE(status)) {
dev_warn(dev, "Failed to register notify handler %08x\n",
status);
return -EINVAL;
}
return 0;
}
static int cros_usbpd_notify_remove_acpi(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
cros_usbpd_notify_acpi);
return 0;
}
static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = {
{ ACPI_DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids);
static struct platform_driver cros_usbpd_notify_acpi_driver = {
.driver = {
.name = DRV_NAME_PLAT_ACPI,
.acpi_match_table = cros_usbpd_notify_acpi_device_ids,
},
.probe = cros_usbpd_notify_probe_acpi,
.remove = cros_usbpd_notify_remove_acpi,
};
#endif /* CONFIG_ACPI */
static int cros_usbpd_notify_plat(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *data)
{
struct cros_usbpd_notify_data *pdnotify = container_of(nb,
struct cros_usbpd_notify_data, nb);
struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
u32 host_event = cros_ec_get_host_event(ec_dev);
if (!host_event)
return NOTIFY_DONE;
if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
cros_usbpd_get_event_and_notify(pdnotify->dev, ec_dev);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int cros_usbpd_notify_probe_plat(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
struct cros_usbpd_notify_data *pdnotify;
int ret;
pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
if (!pdnotify)
return -ENOMEM;
pdnotify->dev = dev;
pdnotify->ec = ecdev->ec_dev;
pdnotify->nb.notifier_call = cros_usbpd_notify_plat;
dev_set_drvdata(dev, pdnotify);
ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier,
&pdnotify->nb);
if (ret < 0) {
dev_err(dev, "Failed to register notifier\n");
return ret;
}
return 0;
}
static int cros_usbpd_notify_remove_plat(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
struct cros_usbpd_notify_data *pdnotify =
(struct cros_usbpd_notify_data *)dev_get_drvdata(dev);
blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier,
&pdnotify->nb);
return 0;
}
static struct platform_driver cros_usbpd_notify_plat_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_usbpd_notify_probe_plat,
.remove = cros_usbpd_notify_remove_plat,
};
static int __init cros_usbpd_notify_init(void)
{
int ret;
ret = platform_driver_register(&cros_usbpd_notify_plat_driver);
if (ret < 0)
return ret;
#ifdef CONFIG_ACPI
ret = platform_driver_register(&cros_usbpd_notify_acpi_driver);
if (ret) {
platform_driver_unregister(&cros_usbpd_notify_plat_driver);
return ret;
}
#endif
return 0;
}
static void __exit cros_usbpd_notify_exit(void)
{
#ifdef CONFIG_ACPI
platform_driver_unregister(&cros_usbpd_notify_acpi_driver);
#endif
platform_driver_unregister(&cros_usbpd_notify_plat_driver);
}
module_init(cros_usbpd_notify_init);
module_exit(cros_usbpd_notify_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
MODULE_AUTHOR("Jon Flatley <[email protected]>");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_usbpd_notify.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC
*
* This driver provides the ability to view and manage Type C ports through the
* Chrome OS EC.
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_tbt.h>
#include "cros_ec_typec.h"
#include "cros_typec_vdm.h"
#define DRV_NAME "cros-ec-typec"
#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
{
const char *buf;
int ret;
memset(cap, 0, sizeof(*cap));
ret = fwnode_property_read_string(fwnode, "power-role", &buf);
if (ret) {
dev_err(dev, "power-role not found: %d\n", ret);
return ret;
}
ret = typec_find_port_power_role(buf);
if (ret < 0)
return ret;
cap->type = ret;
ret = fwnode_property_read_string(fwnode, "data-role", &buf);
if (ret) {
dev_err(dev, "data-role not found: %d\n", ret);
return ret;
}
ret = typec_find_port_data_role(buf);
if (ret < 0)
return ret;
cap->data = ret;
/* Try-power-role is optional. */
ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
if (ret) {
dev_warn(dev, "try-power-role not found: %d\n", ret);
cap->prefer_role = TYPEC_NO_PREFERRED_ROLE;
} else {
ret = typec_find_power_role(buf);
if (ret < 0)
return ret;
cap->prefer_role = ret;
}
cap->fwnode = fwnode;
return 0;
}
static int cros_typec_get_switch_handles(struct cros_typec_port *port,
struct fwnode_handle *fwnode,
struct device *dev)
{
int ret = 0;
port->mux = fwnode_typec_mux_get(fwnode);
if (IS_ERR(port->mux)) {
ret = PTR_ERR(port->mux);
dev_dbg(dev, "Mux handle not found: %d.\n", ret);
goto mux_err;
}
port->retimer = fwnode_typec_retimer_get(fwnode);
if (IS_ERR(port->retimer)) {
ret = PTR_ERR(port->retimer);
dev_dbg(dev, "Retimer handle not found: %d.\n", ret);
goto retimer_sw_err;
}
port->ori_sw = fwnode_typec_switch_get(fwnode);
if (IS_ERR(port->ori_sw)) {
ret = PTR_ERR(port->ori_sw);
dev_dbg(dev, "Orientation switch handle not found: %d\n", ret);
goto ori_sw_err;
}
port->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(port->role_sw)) {
ret = PTR_ERR(port->role_sw);
dev_dbg(dev, "USB role switch handle not found: %d\n", ret);
goto role_sw_err;
}
return 0;
role_sw_err:
typec_switch_put(port->ori_sw);
port->ori_sw = NULL;
ori_sw_err:
typec_retimer_put(port->retimer);
port->retimer = NULL;
retimer_sw_err:
typec_mux_put(port->mux);
port->mux = NULL;
mux_err:
return ret;
}
static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
bool pd_en)
{
struct cros_typec_port *port = typec->ports[port_num];
struct typec_partner_desc p_desc = {
.usb_pd = pd_en,
};
int ret = 0;
/*
* Fill an initial PD identity, which will then be updated with info
* from the EC.
*/
p_desc.identity = &port->p_identity;
port->partner = typec_register_partner(port->port, &p_desc);
if (IS_ERR(port->partner)) {
ret = PTR_ERR(port->partner);
port->partner = NULL;
}
return ret;
}
static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num,
bool is_partner)
{
struct cros_typec_port *port = typec->ports[port_num];
struct cros_typec_altmode_node *node, *tmp;
struct list_head *head;
head = is_partner ? &port->partner_mode_list : &port->plug_mode_list;
list_for_each_entry_safe(node, tmp, head, list) {
list_del(&node->list);
typec_unregister_altmode(node->amode);
devm_kfree(typec->dev, node);
}
}
/*
* Map the Type-C Mux state to retimer state and call the retimer set function. We need this
* because we re-use the Type-C mux state for retimers.
*/
static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_mux_state state)
{
struct typec_retimer_state rstate = {
.alt = state.alt,
.mode = state.mode,
.data = state.data,
};
return typec_retimer_set(retimer, &rstate);
}
static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
{
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
cros_typec_retimer_set(port->retimer, port->state);
return typec_mux_set(port->mux, &port->state);
}
static void cros_typec_remove_partner(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
if (!port->partner)
return;
cros_typec_unregister_altmodes(typec, port_num, true);
typec_partner_set_usb_power_delivery(port->partner, NULL);
usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
port->partner_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(port->partner_src_caps);
port->partner_src_caps = NULL;
usb_power_delivery_unregister(port->partner_pd);
port->partner_pd = NULL;
cros_typec_usb_disconnect_state(port);
port->mux_flags = USB_PD_MUX_NONE;
typec_unregister_partner(port->partner);
port->partner = NULL;
memset(&port->p_identity, 0, sizeof(port->p_identity));
port->sop_disc_done = false;
}
static void cros_typec_remove_cable(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
if (!port->cable)
return;
cros_typec_unregister_altmodes(typec, port_num, false);
typec_unregister_plug(port->plug);
port->plug = NULL;
typec_unregister_cable(port->cable);
port->cable = NULL;
memset(&port->c_identity, 0, sizeof(port->c_identity));
port->sop_prime_disc_done = false;
}
static void cros_typec_unregister_port_altmodes(struct cros_typec_port *port)
{
int i;
for (i = 0; i < CROS_EC_ALTMODE_MAX; i++)
typec_unregister_altmode(port->port_altmode[i]);
}
static void cros_unregister_ports(struct cros_typec_data *typec)
{
int i;
for (i = 0; i < typec->num_ports; i++) {
if (!typec->ports[i])
continue;
cros_typec_remove_partner(typec, i);
cros_typec_remove_cable(typec, i);
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
cros_typec_unregister_port_altmodes(typec->ports[i]);
typec_unregister_port(typec->ports[i]->port);
}
}
/*
* Register port alt modes with known values till we start retrieving
* port capabilities from the EC.
*/
static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
struct typec_altmode_desc desc;
struct typec_altmode *amode;
/* All PD capable CrOS devices are assumed to support DP altmode. */
desc.svid = USB_TYPEC_DP_SID,
desc.mode = USB_TYPEC_DP_MODE,
desc.vdo = DP_PORT_VDO,
amode = typec_port_register_altmode(port->port, &desc);
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
typec_altmode_set_drvdata(amode, port);
amode->ops = &port_amode_ops;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
* if it doesn't support it, so it's safe to register it unconditionally
* here for now.
*/
memset(&desc, 0, sizeof(desc));
desc.svid = USB_TYPEC_TBT_SID,
desc.mode = TYPEC_ANY_MODE,
amode = typec_port_register_altmode(port->port, &desc);
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
typec_altmode_set_drvdata(amode, port);
amode->ops = &port_amode_ops;
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
return 0;
}
static int cros_typec_init_ports(struct cros_typec_data *typec)
{
struct device *dev = typec->dev;
struct typec_capability *cap;
struct fwnode_handle *fwnode;
struct cros_typec_port *cros_port;
const char *port_prop;
int ret;
int nports;
u32 port_num = 0;
nports = device_get_child_node_count(dev);
if (nports == 0) {
dev_err(dev, "No port entries found.\n");
return -ENODEV;
}
if (nports > typec->num_ports) {
dev_err(dev, "More ports listed than can be supported.\n");
return -EINVAL;
}
/* DT uses "reg" to specify port number. */
port_prop = dev->of_node ? "reg" : "port-number";
device_for_each_child_node(dev, fwnode) {
if (fwnode_property_read_u32(fwnode, port_prop, &port_num)) {
ret = -EINVAL;
dev_err(dev, "No port-number for port, aborting.\n");
goto unregister_ports;
}
if (port_num >= typec->num_ports) {
dev_err(dev, "Invalid port number.\n");
ret = -EINVAL;
goto unregister_ports;
}
dev_dbg(dev, "Registering port %d\n", port_num);
cros_port = devm_kzalloc(dev, sizeof(*cros_port), GFP_KERNEL);
if (!cros_port) {
ret = -ENOMEM;
goto unregister_ports;
}
cros_port->port_num = port_num;
cros_port->typec_data = typec;
typec->ports[port_num] = cros_port;
cap = &cros_port->caps;
ret = cros_typec_parse_port_props(cap, fwnode, dev);
if (ret < 0)
goto unregister_ports;
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
ret = PTR_ERR(cros_port->port);
dev_err_probe(dev, ret, "Failed to register port %d\n", port_num);
goto unregister_ports;
}
ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
if (ret) {
dev_dbg(dev, "No switch control for port %d, err: %d\n", port_num, ret);
if (ret == -EPROBE_DEFER)
goto unregister_ports;
}
ret = cros_typec_register_port_altmodes(typec, port_num);
if (ret) {
dev_err(dev, "Failed to register port altmodes\n");
goto unregister_ports;
}
cros_port->disc_data = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
if (!cros_port->disc_data) {
ret = -ENOMEM;
goto unregister_ports;
}
INIT_LIST_HEAD(&cros_port->partner_mode_list);
INIT_LIST_HEAD(&cros_port->plug_mode_list);
}
return 0;
unregister_ports:
cros_unregister_ports(typec);
return ret;
}
static int cros_typec_usb_safe_state(struct cros_typec_port *port)
{
int ret;
port->state.mode = TYPEC_STATE_SAFE;
ret = cros_typec_retimer_set(port->retimer, port->state);
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
return ret;
}
/**
* cros_typec_get_cable_vdo() - Get Cable VDO of the connected cable
* @port: Type-C port data
* @svid: Standard or Vendor ID to match
*
* Returns the Cable VDO if match is found and returns 0 if match is not found.
*/
static int cros_typec_get_cable_vdo(struct cros_typec_port *port, u16 svid)
{
struct list_head *head = &port->plug_mode_list;
struct cros_typec_altmode_node *node;
u32 ret = 0;
list_for_each_entry(node, head, list) {
if (node->amode->svid == svid)
return node->amode->vdo;
}
return ret;
}
/*
* Spoof the VDOs that were likely communicated by the partner for TBT alt
* mode.
*/
static int cros_typec_enable_tbt(struct cros_typec_data *typec,
int port_num,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
struct typec_thunderbolt_data data;
int ret;
if (typec->pd_ctrl_ver < 2) {
dev_err(typec->dev,
"PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
return -ENOTSUPP;
}
/* Device Discover Mode VDO */
data.device_mode = TBT_MODE;
if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER)
data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3);
/* Cable Discover Mode VDO */
data.cable_mode = TBT_MODE;
data.cable_mode |= cros_typec_get_cable_vdo(port, USB_TYPEC_TBT_SID);
data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
data.cable_mode |= TBT_CABLE_OPTICAL;
if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
data.cable_mode |= TBT_CABLE_LINK_TRAINING;
data.cable_mode |= TBT_SET_CABLE_ROUNDED(pd_ctrl->cable_gen);
/* Enter Mode VDO */
data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE;
if (!port->state.alt) {
port->state.alt = port->port_altmode[CROS_EC_ALTMODE_TBT];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
}
port->state.data = &data;
port->state.mode = TYPEC_TBT_MODE;
return typec_mux_set(port->mux, &port->state);
}
/* Spoof the VDOs that were likely communicated by the partner. */
static int cros_typec_enable_dp(struct cros_typec_data *typec,
int port_num,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
struct typec_displayport_data dp_data;
int ret;
if (typec->pd_ctrl_ver < 2) {
dev_err(typec->dev,
"PD_CTRL version too old: %d\n", typec->pd_ctrl_ver);
return -ENOTSUPP;
}
if (!pd_ctrl->dp_mode) {
dev_err(typec->dev, "No valid DP mode provided.\n");
return -EINVAL;
}
/* Status VDO. */
dp_data.status = DP_STATUS_ENABLED;
if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
dp_data.status |= DP_STATUS_IRQ_HPD;
if (port->mux_flags & USB_PD_MUX_HPD_LVL)
dp_data.status |= DP_STATUS_HPD_STATE;
/* Configuration VDO. */
dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode);
if (!port->state.alt) {
port->state.alt = port->port_altmode[CROS_EC_ALTMODE_DP];
ret = cros_typec_usb_safe_state(port);
if (ret)
return ret;
}
port->state.data = &dp_data;
port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
ret = cros_typec_retimer_set(port->retimer, port->state);
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
return ret;
}
static int cros_typec_enable_usb4(struct cros_typec_data *typec,
int port_num,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
struct enter_usb_data data;
data.eudo = EUDO_USB_MODE_USB4 << EUDO_USB_MODE_SHIFT;
/* Cable Speed */
data.eudo |= pd_ctrl->cable_speed << EUDO_CABLE_SPEED_SHIFT;
/* Cable Type */
if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
data.eudo |= EUDO_CABLE_TYPE_OPTICAL << EUDO_CABLE_TYPE_SHIFT;
else if (cros_typec_get_cable_vdo(port, USB_TYPEC_TBT_SID) & TBT_CABLE_RETIMER)
data.eudo |= EUDO_CABLE_TYPE_RE_TIMER << EUDO_CABLE_TYPE_SHIFT;
else if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
data.eudo |= EUDO_CABLE_TYPE_RE_DRIVER << EUDO_CABLE_TYPE_SHIFT;
data.active_link_training = !!(pd_ctrl->control_flags &
USB_PD_CTRL_ACTIVE_LINK_UNIDIR);
port->state.alt = NULL;
port->state.data = &data;
port->state.mode = TYPEC_MODE_USB4;
return typec_mux_set(port->mux, &port->state);
}
static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
struct ec_response_usb_pd_control_v2 *pd_ctrl)
{
struct cros_typec_port *port = typec->ports[port_num];
struct ec_response_usb_pd_mux_info resp;
struct ec_params_usb_pd_mux_info req = {
.port = port_num,
};
struct ec_params_usb_pd_mux_ack mux_ack;
enum typec_orientation orientation;
int ret;
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_INFO,
&req, sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed to get mux info for port: %d, err = %d\n",
port_num, ret);
return ret;
}
/* No change needs to be made, let's exit early. */
if (port->mux_flags == resp.flags && port->role == pd_ctrl->role)
return 0;
port->mux_flags = resp.flags;
port->role = pd_ctrl->role;
if (port->mux_flags == USB_PD_MUX_NONE) {
ret = cros_typec_usb_disconnect_state(port);
goto mux_ack;
}
if (port->mux_flags & USB_PD_MUX_POLARITY_INVERTED)
orientation = TYPEC_ORIENTATION_REVERSE;
else
orientation = TYPEC_ORIENTATION_NORMAL;
ret = typec_switch_set(port->ori_sw, orientation);
if (ret)
return ret;
ret = usb_role_switch_set_role(typec->ports[port_num]->role_sw,
pd_ctrl->role & PD_CTRL_RESP_ROLE_DATA
? USB_ROLE_HOST : USB_ROLE_DEVICE);
if (ret)
return ret;
if (port->mux_flags & USB_PD_MUX_USB4_ENABLED) {
ret = cros_typec_enable_usb4(typec, port_num, pd_ctrl);
} else if (port->mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) {
ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl);
} else if (port->mux_flags & USB_PD_MUX_DP_ENABLED) {
ret = cros_typec_enable_dp(typec, port_num, pd_ctrl);
} else if (port->mux_flags & USB_PD_MUX_SAFE_MODE) {
ret = cros_typec_usb_safe_state(port);
} else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
ret = cros_typec_retimer_set(port->retimer, port->state);
if (!ret)
ret = typec_mux_set(port->mux, &port->state);
} else {
dev_dbg(typec->dev,
"Unrecognized mode requested, mux flags: %x\n",
port->mux_flags);
}
mux_ack:
if (!typec->needs_mux_ack)
return ret;
/* Sending Acknowledgment to EC */
mux_ack.port = port_num;
if (cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_MUX_ACK, &mux_ack,
sizeof(mux_ack), NULL, 0) < 0)
dev_warn(typec->dev,
"Failed to send Mux ACK to EC for port: %d\n",
port_num);
return ret;
}
static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control *resp)
{
struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
if (!resp->enabled)
polarity = TYPEC_ORIENTATION_NONE;
else if (!resp->polarity)
polarity = TYPEC_ORIENTATION_NORMAL;
else
polarity = TYPEC_ORIENTATION_REVERSE;
typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
typec_set_orientation(port, polarity);
}
static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control_v1 *resp)
{
struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
bool pd_en;
int ret;
if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
polarity = TYPEC_ORIENTATION_NONE;
else if (!resp->polarity)
polarity = TYPEC_ORIENTATION_NORMAL;
else
polarity = TYPEC_ORIENTATION_REVERSE;
typec_set_orientation(port, polarity);
typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
TYPEC_HOST : TYPEC_DEVICE);
typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
TYPEC_SOURCE : TYPEC_SINK);
typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
TYPEC_SOURCE : TYPEC_SINK);
/* Register/remove partners when a connect/disconnect occurs. */
if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
if (typec->ports[port_num]->partner)
return;
pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
ret = cros_typec_add_partner(typec, port_num, pd_en);
if (ret)
dev_warn(typec->dev,
"Failed to register partner on port: %d\n",
port_num);
} else {
cros_typec_remove_partner(typec, port_num);
cros_typec_remove_cable(typec, port_num);
}
}
/*
* Helper function to register partner/plug altmodes.
*/
static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num,
bool is_partner)
{
struct cros_typec_port *port = typec->ports[port_num];
struct ec_response_typec_discovery *sop_disc = port->disc_data;
struct cros_typec_altmode_node *node;
struct typec_altmode_desc desc;
struct typec_altmode *amode;
int num_altmodes = 0;
int ret = 0;
int i, j;
for (i = 0; i < sop_disc->svid_count; i++) {
for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
memset(&desc, 0, sizeof(desc));
desc.svid = sop_disc->svids[i].svid;
desc.mode = j + 1;
desc.vdo = sop_disc->svids[i].mode_vdo[j];
if (is_partner)
amode = typec_partner_register_altmode(port->partner, &desc);
else
amode = typec_plug_register_altmode(port->plug, &desc);
if (IS_ERR(amode)) {
ret = PTR_ERR(amode);
goto err_cleanup;
}
/* If no memory is available we should unregister and exit. */
node = devm_kzalloc(typec->dev, sizeof(*node), GFP_KERNEL);
if (!node) {
ret = -ENOMEM;
typec_unregister_altmode(amode);
goto err_cleanup;
}
node->amode = amode;
if (is_partner)
list_add_tail(&node->list, &port->partner_mode_list);
else
list_add_tail(&node->list, &port->plug_mode_list);
num_altmodes++;
}
}
if (is_partner)
ret = typec_partner_set_num_altmodes(port->partner, num_altmodes);
else
ret = typec_plug_set_num_altmodes(port->plug, num_altmodes);
if (ret < 0) {
dev_err(typec->dev, "Unable to set %s num_altmodes for port: %d\n",
is_partner ? "partner" : "plug", port_num);
goto err_cleanup;
}
return 0;
err_cleanup:
cros_typec_unregister_altmodes(typec, port_num, is_partner);
return ret;
}
/*
* Parse the PD identity data from the EC PD discovery responses and copy that to the supplied
* PD identity struct.
*/
static void cros_typec_parse_pd_identity(struct usb_pd_identity *id,
struct ec_response_typec_discovery *disc)
{
int i;
/* First, update the PD identity VDOs for the partner. */
if (disc->identity_count > 0)
id->id_header = disc->discovery_vdo[0];
if (disc->identity_count > 1)
id->cert_stat = disc->discovery_vdo[1];
if (disc->identity_count > 2)
id->product = disc->discovery_vdo[2];
/* Copy the remaining identity VDOs till a maximum of 6. */
for (i = 3; i < disc->identity_count && i < VDO_MAX_OBJECTS; i++)
id->vdo[i - 3] = disc->discovery_vdo[i];
}
static int cros_typec_handle_sop_prime_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
{
struct cros_typec_port *port = typec->ports[port_num];
struct ec_response_typec_discovery *disc = port->disc_data;
struct typec_cable_desc c_desc = {};
struct typec_plug_desc p_desc;
struct ec_params_typec_discovery req = {
.port = port_num,
.partner_type = TYPEC_PARTNER_SOP_PRIME,
};
u32 cable_plug_type;
int ret = 0;
memset(disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP' discovery data for port: %d\n", port_num);
goto sop_prime_disc_exit;
}
/* Parse the PD identity data, even if only 0s were returned. */
cros_typec_parse_pd_identity(&port->c_identity, disc);
if (disc->identity_count != 0) {
cable_plug_type = VDO_TYPEC_CABLE_TYPE(port->c_identity.vdo[0]);
switch (cable_plug_type) {
case CABLE_ATYPE:
c_desc.type = USB_PLUG_TYPE_A;
break;
case CABLE_BTYPE:
c_desc.type = USB_PLUG_TYPE_B;
break;
case CABLE_CTYPE:
c_desc.type = USB_PLUG_TYPE_C;
break;
case CABLE_CAPTIVE:
c_desc.type = USB_PLUG_CAPTIVE;
break;
default:
c_desc.type = USB_PLUG_NONE;
}
c_desc.active = PD_IDH_PTYPE(port->c_identity.id_header) == IDH_PTYPE_ACABLE;
}
c_desc.identity = &port->c_identity;
c_desc.pd_revision = pd_revision;
port->cable = typec_register_cable(port->port, &c_desc);
if (IS_ERR(port->cable)) {
ret = PTR_ERR(port->cable);
port->cable = NULL;
goto sop_prime_disc_exit;
}
p_desc.index = TYPEC_PLUG_SOP_P;
port->plug = typec_register_plug(port->cable, &p_desc);
if (IS_ERR(port->plug)) {
ret = PTR_ERR(port->plug);
port->plug = NULL;
goto sop_prime_disc_exit;
}
ret = cros_typec_register_altmodes(typec, port_num, false);
if (ret < 0) {
dev_err(typec->dev, "Failed to register plug altmodes, port: %d\n", port_num);
goto sop_prime_disc_exit;
}
return 0;
sop_prime_disc_exit:
cros_typec_remove_cable(typec, port_num);
return ret;
}
static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num, u16 pd_revision)
{
struct cros_typec_port *port = typec->ports[port_num];
struct ec_response_typec_discovery *sop_disc = port->disc_data;
struct ec_params_typec_discovery req = {
.port = port_num,
.partner_type = TYPEC_PARTNER_SOP,
};
int ret = 0;
if (!port->partner) {
dev_err(typec->dev,
"SOP Discovery received without partner registered, port: %d\n",
port_num);
ret = -EINVAL;
goto disc_exit;
}
typec_partner_set_pd_revision(port->partner, pd_revision);
memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
if (ret < 0) {
dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
goto disc_exit;
}
cros_typec_parse_pd_identity(&port->p_identity, sop_disc);
ret = typec_partner_set_identity(port->partner);
if (ret < 0) {
dev_err(typec->dev, "Failed to update partner PD identity, port: %d\n", port_num);
goto disc_exit;
}
ret = cros_typec_register_altmodes(typec, port_num, true);
if (ret < 0) {
dev_err(typec->dev, "Failed to register partner altmodes, port: %d\n", port_num);
goto disc_exit;
}
disc_exit:
return ret;
}
static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_num, u32 events_mask)
{
struct ec_params_typec_control req = {
.port = port_num,
.command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
.clear_events_mask = events_mask,
};
return cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
sizeof(req), NULL, 0);
}
static void cros_typec_register_partner_pdos(struct cros_typec_data *typec,
struct ec_response_typec_status *resp, int port_num)
{
struct usb_power_delivery_capabilities_desc caps_desc = {};
struct usb_power_delivery_desc desc = {
.revision = (le16_to_cpu(resp->sop_revision) & 0xff00) >> 4,
};
struct cros_typec_port *port = typec->ports[port_num];
if (!port->partner || port->partner_pd)
return;
/* If no caps are available, don't bother creating a device. */
if (!resp->source_cap_count && !resp->sink_cap_count)
return;
port->partner_pd = typec_partner_usb_power_delivery_register(port->partner, &desc);
if (IS_ERR(port->partner_pd)) {
dev_warn(typec->dev, "Failed to register partner PD device, port: %d\n", port_num);
return;
}
typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
memcpy(caps_desc.pdo, resp->source_cap_pdos, sizeof(u32) * resp->source_cap_count);
caps_desc.role = TYPEC_SOURCE;
port->partner_src_caps = usb_power_delivery_register_capabilities(port->partner_pd,
&caps_desc);
if (IS_ERR(port->partner_src_caps))
dev_warn(typec->dev, "Failed to register source caps, port: %d\n", port_num);
memset(&caps_desc, 0, sizeof(caps_desc));
memcpy(caps_desc.pdo, resp->sink_cap_pdos, sizeof(u32) * resp->sink_cap_count);
caps_desc.role = TYPEC_SINK;
port->partner_sink_caps = usb_power_delivery_register_capabilities(port->partner_pd,
&caps_desc);
if (IS_ERR(port->partner_sink_caps))
dev_warn(typec->dev, "Failed to register sink caps, port: %d\n", port_num);
}
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_status resp;
struct ec_params_typec_status req = {
.port = port_num,
};
int ret;
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
return;
}
/* If we got a hard reset, unregister everything and return. */
if (resp.events & PD_STATUS_EVENT_HARD_RESET) {
cros_typec_remove_partner(typec, port_num);
cros_typec_remove_cable(typec, port_num);
ret = cros_typec_send_clear_event(typec, port_num,
PD_STATUS_EVENT_HARD_RESET);
if (ret < 0)
dev_warn(typec->dev,
"Failed hard reset event clear, port: %d\n", port_num);
return;
}
/* Handle any events appropriately. */
if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE && !typec->ports[port_num]->sop_disc_done) {
u16 sop_revision;
/* Convert BCD to the format preferred by the TypeC framework */
sop_revision = (le16_to_cpu(resp.sop_revision) & 0xff00) >> 4;
ret = cros_typec_handle_sop_disc(typec, port_num, sop_revision);
if (ret < 0)
dev_err(typec->dev, "Couldn't parse SOP Disc data, port: %d\n", port_num);
else {
typec->ports[port_num]->sop_disc_done = true;
ret = cros_typec_send_clear_event(typec, port_num,
PD_STATUS_EVENT_SOP_DISC_DONE);
if (ret < 0)
dev_warn(typec->dev,
"Failed SOP Disc event clear, port: %d\n", port_num);
}
if (resp.sop_connected)
typec_set_pwr_opmode(typec->ports[port_num]->port, TYPEC_PWR_MODE_PD);
cros_typec_register_partner_pdos(typec, &resp, port_num);
}
if (resp.events & PD_STATUS_EVENT_SOP_PRIME_DISC_DONE &&
!typec->ports[port_num]->sop_prime_disc_done) {
u16 sop_prime_revision;
/* Convert BCD to the format preferred by the TypeC framework */
sop_prime_revision = (le16_to_cpu(resp.sop_prime_revision) & 0xff00) >> 4;
ret = cros_typec_handle_sop_prime_disc(typec, port_num, sop_prime_revision);
if (ret < 0)
dev_err(typec->dev, "Couldn't parse SOP' Disc data, port: %d\n", port_num);
else {
typec->ports[port_num]->sop_prime_disc_done = true;
ret = cros_typec_send_clear_event(typec, port_num,
PD_STATUS_EVENT_SOP_PRIME_DISC_DONE);
if (ret < 0)
dev_warn(typec->dev,
"Failed SOP Disc event clear, port: %d\n", port_num);
}
}
if (resp.events & PD_STATUS_EVENT_VDM_REQ_REPLY) {
cros_typec_handle_vdm_response(typec, port_num);
ret = cros_typec_send_clear_event(typec, port_num, PD_STATUS_EVENT_VDM_REQ_REPLY);
if (ret < 0)
dev_warn(typec->dev, "Failed VDM Reply event clear, port: %d\n", port_num);
}
if (resp.events & PD_STATUS_EVENT_VDM_ATTENTION) {
cros_typec_handle_vdm_attention(typec, port_num);
ret = cros_typec_send_clear_event(typec, port_num, PD_STATUS_EVENT_VDM_ATTENTION);
if (ret < 0)
dev_warn(typec->dev, "Failed VDM attention event clear, port: %d\n",
port_num);
}
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
struct ec_response_usb_pd_control_v2 resp;
int ret;
if (port_num < 0 || port_num >= typec->num_ports) {
dev_err(typec->dev, "cannot get status for invalid port %d\n",
port_num);
return -EINVAL;
}
req.port = port_num;
req.role = USB_PD_CTRL_ROLE_NO_CHANGE;
req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
req.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_cmd(typec->ec, typec->pd_ctrl_ver,
EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0)
return ret;
/* Update the switches if they exist, according to requested state */
ret = cros_typec_configure_mux(typec, port_num, &resp);
if (ret)
dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret);
dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
if (typec->pd_ctrl_ver != 0)
cros_typec_set_port_params_v1(typec, port_num,
(struct ec_response_usb_pd_control_v1 *)&resp);
else
cros_typec_set_port_params_v0(typec, port_num,
(struct ec_response_usb_pd_control *) &resp);
if (typec->typec_cmd_supported)
cros_typec_handle_status(typec, port_num);
return 0;
}
static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
{
struct ec_params_get_cmd_versions_v1 req_v1;
struct ec_response_get_cmd_versions resp;
int ret;
/* We're interested in the PD control command version. */
req_v1.cmd = EC_CMD_USB_PD_CONTROL;
ret = cros_ec_cmd(typec->ec, 1, EC_CMD_GET_CMD_VERSIONS,
&req_v1, sizeof(req_v1), &resp, sizeof(resp));
if (ret < 0)
return ret;
if (resp.version_mask & EC_VER_MASK(2))
typec->pd_ctrl_ver = 2;
else if (resp.version_mask & EC_VER_MASK(1))
typec->pd_ctrl_ver = 1;
else
typec->pd_ctrl_ver = 0;
dev_dbg(typec->dev, "PD Control has version mask 0x%02x\n",
typec->pd_ctrl_ver & 0xff);
return 0;
}
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
int ret, i;
for (i = 0; i < typec->num_ports; i++) {
ret = cros_typec_port_update(typec, i);
if (ret < 0)
dev_warn(typec->dev, "Update failed for port: %d\n", i);
}
}
static int cros_ec_typec_event(struct notifier_block *nb,
unsigned long host_event, void *_notify)
{
struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb);
flush_work(&typec->port_work);
schedule_work(&typec->port_work);
return NOTIFY_OK;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_typec_acpi_id[] = {
{ "GOOG0014", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, cros_typec_acpi_id);
#endif
#ifdef CONFIG_OF
static const struct of_device_id cros_typec_of_match[] = {
{ .compatible = "google,cros-ec-typec", },
{}
};
MODULE_DEVICE_TABLE(of, cros_typec_of_match);
#endif
static int cros_typec_probe(struct platform_device *pdev)
{
struct cros_ec_dev *ec_dev = NULL;
struct device *dev = &pdev->dev;
struct cros_typec_data *typec;
struct ec_response_usb_pd_ports resp;
int ret, i;
typec = devm_kzalloc(dev, sizeof(*typec), GFP_KERNEL);
if (!typec)
return -ENOMEM;
typec->dev = dev;
typec->ec = dev_get_drvdata(pdev->dev.parent);
if (!typec->ec) {
dev_err(dev, "couldn't find parent EC device\n");
return -ENODEV;
}
platform_set_drvdata(pdev, typec);
ret = cros_typec_get_cmd_version(typec);
if (ret < 0) {
dev_err(dev, "failed to get PD command version info\n");
return ret;
}
ec_dev = dev_get_drvdata(&typec->ec->ec->dev);
if (!ec_dev)
return -EPROBE_DEFER;
typec->typec_cmd_supported = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_CMD);
typec->needs_mux_ack = cros_ec_check_features(ec_dev, EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK);
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
if (ret < 0)
return ret;
typec->num_ports = resp.num_ports;
if (typec->num_ports > EC_USB_PD_MAX_PORTS) {
dev_warn(typec->dev,
"Too many ports reported: %d, limiting to max: %d\n",
typec->num_ports, EC_USB_PD_MAX_PORTS);
typec->num_ports = EC_USB_PD_MAX_PORTS;
}
ret = cros_typec_init_ports(typec);
if (ret < 0)
return ret;
INIT_WORK(&typec->port_work, cros_typec_port_work);
/*
* Safe to call port update here, since we haven't registered the
* PD notifier yet.
*/
for (i = 0; i < typec->num_ports; i++) {
ret = cros_typec_port_update(typec, i);
if (ret < 0)
goto unregister_ports;
}
typec->nb.notifier_call = cros_ec_typec_event;
ret = cros_usbpd_register_notify(&typec->nb);
if (ret < 0)
goto unregister_ports;
return 0;
unregister_ports:
cros_unregister_ports(typec);
return ret;
}
static int __maybe_unused cros_typec_suspend(struct device *dev)
{
struct cros_typec_data *typec = dev_get_drvdata(dev);
cancel_work_sync(&typec->port_work);
return 0;
}
static int __maybe_unused cros_typec_resume(struct device *dev)
{
struct cros_typec_data *typec = dev_get_drvdata(dev);
/* Refresh port state. */
schedule_work(&typec->port_work);
return 0;
}
static const struct dev_pm_ops cros_typec_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume)
};
static struct platform_driver cros_typec_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
.of_match_table = of_match_ptr(cros_typec_of_match),
.pm = &cros_typec_pm_ops,
},
.probe = cros_typec_probe,
};
module_platform_driver(cros_typec_driver);
MODULE_AUTHOR("Prashant Malani <[email protected]>");
MODULE_DESCRIPTION("Chrome OS EC Type C control");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_ec_typec.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright 2018 Google LLC.
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/rpmsg.h>
#include <linux/slab.h>
#include "cros_ec.h"
#define EC_MSG_TIMEOUT_MS 200
#define HOST_COMMAND_MARK 1
#define HOST_EVENT_MARK 2
/**
* struct cros_ec_rpmsg_response - rpmsg message format from from EC.
*
* @type: The type of message, should be either HOST_COMMAND_MARK or
* HOST_EVENT_MARK, representing that the message is a response to
* host command, or a host event.
* @data: ec_host_response for host command.
*/
struct cros_ec_rpmsg_response {
u8 type;
u8 data[] __aligned(4);
};
/**
* struct cros_ec_rpmsg - information about a EC over rpmsg.
*
* @rpdev: rpmsg device we are connected to
* @xfer_ack: completion for host command transfer.
* @host_event_work: Work struct for pending host event.
* @ept: The rpmsg endpoint of this channel.
* @has_pending_host_event: Boolean used to check if there is a pending event.
* @probe_done: Flag to indicate that probe is done.
*/
struct cros_ec_rpmsg {
struct rpmsg_device *rpdev;
struct completion xfer_ack;
struct work_struct host_event_work;
struct rpmsg_endpoint *ept;
bool has_pending_host_event;
bool probe_done;
};
/**
* cros_ec_cmd_xfer_rpmsg - Transfer a message over rpmsg and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*
* This is only used for old EC proto version, and is not supported for this
* driver.
*
* Return: -EINVAL
*/
static int cros_ec_cmd_xfer_rpmsg(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
return -EINVAL;
}
/**
* cros_ec_pkt_xfer_rpmsg - Transfer a packet over rpmsg and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*
* Return: number of bytes of the reply on success or negative error code.
*/
static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
struct ec_host_response *response;
unsigned long timeout;
int len;
int ret;
u8 sum;
int i;
ec_msg->result = 0;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
if (len < 0)
return len;
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
reinit_completion(&ec_rpmsg->xfer_ack);
ret = rpmsg_send(ec_rpmsg->ept, ec_dev->dout, len);
if (ret) {
dev_err(ec_dev->dev, "rpmsg send failed\n");
return ret;
}
timeout = msecs_to_jiffies(EC_MSG_TIMEOUT_MS);
ret = wait_for_completion_timeout(&ec_rpmsg->xfer_ack, timeout);
if (!ret) {
dev_err(ec_dev->dev, "rpmsg send timeout\n");
return -EIO;
}
/* check response error code */
response = (struct ec_host_response *)ec_dev->din;
ec_msg->result = response->result;
ret = cros_ec_check_result(ec_dev, ec_msg);
if (ret)
goto exit;
if (response->data_len > ec_msg->insize) {
dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
response->data_len, ec_msg->insize);
ret = -EMSGSIZE;
goto exit;
}
/* copy response packet payload and compute checksum */
memcpy(ec_msg->data, ec_dev->din + sizeof(*response),
response->data_len);
sum = 0;
for (i = 0; i < sizeof(*response) + response->data_len; i++)
sum += ec_dev->din[i];
if (sum) {
dev_err(ec_dev->dev, "bad packet checksum, calculated %x\n",
sum);
ret = -EBADMSG;
goto exit;
}
ret = response->data_len;
exit:
if (ec_msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
static void
cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
{
struct cros_ec_rpmsg *ec_rpmsg = container_of(host_event_work,
struct cros_ec_rpmsg,
host_event_work);
cros_ec_irq_thread(0, dev_get_drvdata(&ec_rpmsg->rpdev->dev));
}
static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
int len, void *priv, u32 src)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
struct cros_ec_rpmsg_response *resp;
if (!len) {
dev_warn(ec_dev->dev, "rpmsg received empty response");
return -EINVAL;
}
resp = data;
len -= offsetof(struct cros_ec_rpmsg_response, data);
if (resp->type == HOST_COMMAND_MARK) {
if (len > ec_dev->din_size) {
dev_warn(ec_dev->dev,
"received length %d > din_size %d, truncating",
len, ec_dev->din_size);
len = ec_dev->din_size;
}
memcpy(ec_dev->din, resp->data, len);
complete(&ec_rpmsg->xfer_ack);
} else if (resp->type == HOST_EVENT_MARK) {
/*
* If the host event is sent before cros_ec_register is
* finished, queue the host event.
*/
if (ec_rpmsg->probe_done)
schedule_work(&ec_rpmsg->host_event_work);
else
ec_rpmsg->has_pending_host_event = true;
} else {
dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
resp->type);
return -EINVAL;
}
return 0;
}
static struct rpmsg_endpoint *
cros_ec_rpmsg_create_ept(struct rpmsg_device *rpdev)
{
struct rpmsg_channel_info chinfo = {};
strscpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
chinfo.src = rpdev->src;
chinfo.dst = RPMSG_ADDR_ANY;
return rpmsg_create_ept(rpdev, cros_ec_rpmsg_callback, NULL, chinfo);
}
static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
struct cros_ec_rpmsg *ec_rpmsg;
struct cros_ec_device *ec_dev;
int ret;
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
ec_rpmsg = devm_kzalloc(dev, sizeof(*ec_rpmsg), GFP_KERNEL);
if (!ec_rpmsg)
return -ENOMEM;
ec_dev->dev = dev;
ec_dev->priv = ec_rpmsg;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_rpmsg;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_rpmsg;
ec_dev->phys_name = dev_name(&rpdev->dev);
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
dev_set_drvdata(dev, ec_dev);
ec_rpmsg->rpdev = rpdev;
init_completion(&ec_rpmsg->xfer_ack);
INIT_WORK(&ec_rpmsg->host_event_work,
cros_ec_rpmsg_host_event_function);
ec_rpmsg->ept = cros_ec_rpmsg_create_ept(rpdev);
if (!ec_rpmsg->ept)
return -ENOMEM;
ret = cros_ec_register(ec_dev);
if (ret < 0) {
rpmsg_destroy_ept(ec_rpmsg->ept);
cancel_work_sync(&ec_rpmsg->host_event_work);
return ret;
}
ec_rpmsg->probe_done = true;
if (ec_rpmsg->has_pending_host_event)
schedule_work(&ec_rpmsg->host_event_work);
return 0;
}
static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
cros_ec_unregister(ec_dev);
rpmsg_destroy_ept(ec_rpmsg->ept);
cancel_work_sync(&ec_rpmsg->host_event_work);
}
#ifdef CONFIG_PM_SLEEP
static int cros_ec_rpmsg_suspend(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_suspend(ec_dev);
}
static int cros_ec_rpmsg_resume(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_resume(ec_dev);
}
#endif
static SIMPLE_DEV_PM_OPS(cros_ec_rpmsg_pm_ops, cros_ec_rpmsg_suspend,
cros_ec_rpmsg_resume);
static const struct of_device_id cros_ec_rpmsg_of_match[] = {
{ .compatible = "google,cros-ec-rpmsg", },
{ }
};
MODULE_DEVICE_TABLE(of, cros_ec_rpmsg_of_match);
static struct rpmsg_driver cros_ec_driver_rpmsg = {
.drv = {
.name = "cros-ec-rpmsg",
.of_match_table = cros_ec_rpmsg_of_match,
.pm = &cros_ec_rpmsg_pm_ops,
},
.probe = cros_ec_rpmsg_probe,
.remove = cros_ec_rpmsg_remove,
};
module_rpmsg_driver(cros_ec_driver_rpmsg);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC multi function device (rpmsg)");
| linux-master | drivers/platform/chrome/cros_ec_rpmsg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Chrome OS EC Sensor hub FIFO.
*
* Copyright 2020 Google LLC
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iio/iio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/slab.h>
#define CREATE_TRACE_POINTS
#include "cros_ec_sensorhub_trace.h"
/* Precision of fixed point for the m values from the filter */
#define M_PRECISION BIT(23)
/* Only activate the filter once we have at least this many elements. */
#define TS_HISTORY_THRESHOLD 8
/*
* If we don't have any history entries for this long, empty the filter to
* make sure there are no big discontinuities.
*/
#define TS_HISTORY_BORED_US 500000
/* To measure by how much the filter is overshooting, if it happens. */
#define FUTURE_TS_ANALYTICS_COUNT_MAX 100
static inline int
cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub,
struct cros_ec_sensors_ring_sample *sample)
{
cros_ec_sensorhub_push_data_cb_t cb;
int id = sample->sensor_id;
struct iio_dev *indio_dev;
if (id >= sensorhub->sensor_num)
return -EINVAL;
cb = sensorhub->push_data[id].push_data_cb;
if (!cb)
return 0;
indio_dev = sensorhub->push_data[id].indio_dev;
if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
return 0;
return cb(indio_dev, sample->vector, sample->timestamp);
}
/**
* cros_ec_sensorhub_register_push_data() - register the callback to the hub.
*
* @sensorhub : Sensor Hub object
* @sensor_num : The sensor the caller is interested in.
* @indio_dev : The iio device to use when a sample arrives.
* @cb : The callback to call when a sample arrives.
*
* The callback cb will be used by cros_ec_sensorhub_ring to distribute events
* from the EC.
*
* Return: 0 when callback is registered.
* EINVAL is the sensor number is invalid or the slot already used.
*/
int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
u8 sensor_num,
struct iio_dev *indio_dev,
cros_ec_sensorhub_push_data_cb_t cb)
{
if (sensor_num >= sensorhub->sensor_num)
return -EINVAL;
if (sensorhub->push_data[sensor_num].indio_dev)
return -EINVAL;
sensorhub->push_data[sensor_num].indio_dev = indio_dev;
sensorhub->push_data[sensor_num].push_data_cb = cb;
return 0;
}
EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data);
void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
u8 sensor_num)
{
sensorhub->push_data[sensor_num].indio_dev = NULL;
sensorhub->push_data[sensor_num].push_data_cb = NULL;
}
EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data);
/**
* cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation
* for FIFO events.
* @sensorhub: Sensor Hub object
* @on: true when events are requested.
*
* To be called before sleeping or when noone is listening.
* Return: 0 on success, or an error when we can not communicate with the EC.
*
*/
int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
bool on)
{
int ret, i;
mutex_lock(&sensorhub->cmd_lock);
if (sensorhub->tight_timestamps)
for (i = 0; i < sensorhub->sensor_num; i++)
sensorhub->batch_state[i].last_len = 0;
sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE;
sensorhub->params->fifo_int_enable.enable = on;
sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense);
sensorhub->msg->insize = sizeof(struct ec_response_motion_sense);
ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg);
mutex_unlock(&sensorhub->cmd_lock);
/* We expect to receive a payload of 4 bytes, ignore. */
if (ret > 0)
ret = 0;
return ret;
}
static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2)
{
s64 v1 = *(s64 *)pv1;
s64 v2 = *(s64 *)pv2;
if (v1 > v2)
return 1;
else if (v1 < v2)
return -1;
else
return 0;
}
/*
* cros_ec_sensor_ring_median: Gets median of an array of numbers
*
* For now it's implemented using an inefficient > O(n) sort then return
* the middle element. A more optimal method would be something like
* quickselect, but given that n = 64 we can probably live with it in the
* name of clarity.
*
* Warning: the input array gets modified (sorted)!
*/
static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
{
sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL);
return array[length / 2];
}
/*
* IRQ Timestamp Filtering
*
* Lower down in cros_ec_sensor_ring_process_event(), for each sensor event
* we have to calculate it's timestamp in the AP timebase. There are 3 time
* points:
* a - EC timebase, sensor event
* b - EC timebase, IRQ
* c - AP timebase, IRQ
* a' - what we want: sensor even in AP timebase
*
* While a and b are recorded at accurate times (due to the EC real time
* nature); c is pretty untrustworthy, even though it's recorded the
* first thing in ec_irq_handler(). There is a very good change we'll get
* added lantency due to:
* other irqs
* ddrfreq
* cpuidle
*
* Normally a' = c - b + a, but if we do that naive math any jitter in c
* will get coupled in a', which we don't want. We want a function
* a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c.
*
* Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis.
* The slope of the line won't be exactly 1, there will be some clock drift
* between the 2 chips for various reasons (mechanical stress, temperature,
* voltage). We need to extrapolate values for a future x, without trusting
* recent y values too much.
*
* We use a median filter for the slope, then another median filter for the
* y-intercept to calculate this function:
* dx[n] = x[n-1] - x[n]
* dy[n] = x[n-1] - x[n]
* m[n] = dy[n] / dx[n]
* median_m = median(m[n-k:n])
* error[i] = y[n-i] - median_m * x[n-i]
* median_error = median(error[:k])
* predicted_y = median_m * x + median_error
*
* Implementation differences from above:
* - Redefined y to be actually c - b, this gives us a lot more precision
* to do the math. (c-b)/b variations are more obvious than c/b variations.
* - Since we don't have floating point, any operations involving slope are
* done using fixed point math (*M_PRECISION)
* - Since x and y grow with time, we keep zeroing the graph (relative to
* the last sample), this way math involving *x[n-i] will not overflow
* - EC timestamps are kept in us, it improves the slope calculation precision
*/
/**
* cros_ec_sensor_ring_ts_filter_update() - Update filter history.
*
* @state: Filter information.
* @b: IRQ timestamp, EC timebase (us)
* @c: IRQ timestamp, AP timebase (ns)
*
* Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter
* history.
*/
static void
cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
*state,
s64 b, s64 c)
{
s64 x, y;
s64 dx, dy;
s64 m; /* stored as *M_PRECISION */
s64 *m_history_copy = state->temp_buf;
s64 *error = state->temp_buf;
int i;
/* we trust b the most, that'll be our independent variable */
x = b;
/* y is the offset between AP and EC times, in ns */
y = c - b * 1000;
dx = (state->x_history[0] + state->x_offset) - x;
if (dx == 0)
return; /* we already have this irq in the history */
dy = (state->y_history[0] + state->y_offset) - y;
m = div64_s64(dy * M_PRECISION, dx);
/* Empty filter if we haven't seen any action in a while. */
if (-dx > TS_HISTORY_BORED_US)
state->history_len = 0;
/* Move everything over, also update offset to all absolute coords .*/
for (i = state->history_len - 1; i >= 1; i--) {
state->x_history[i] = state->x_history[i - 1] + dx;
state->y_history[i] = state->y_history[i - 1] + dy;
state->m_history[i] = state->m_history[i - 1];
/*
* Also use the same loop to copy m_history for future
* median extraction.
*/
m_history_copy[i] = state->m_history[i - 1];
}
/* Store the x and y, but remember offset is actually last sample. */
state->x_offset = x;
state->y_offset = y;
state->x_history[0] = 0;
state->y_history[0] = 0;
state->m_history[0] = m;
m_history_copy[0] = m;
if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE)
state->history_len++;
/* Precalculate things for the filter. */
if (state->history_len > TS_HISTORY_THRESHOLD) {
state->median_m =
cros_ec_sensor_ring_median(m_history_copy,
state->history_len - 1);
/*
* Calculate y-intercepts as if m_median is the slope and
* points in the history are on the line. median_error will
* still be in the offset coordinate system.
*/
for (i = 0; i < state->history_len; i++)
error[i] = state->y_history[i] -
div_s64(state->median_m * state->x_history[i],
M_PRECISION);
state->median_error =
cros_ec_sensor_ring_median(error, state->history_len);
} else {
state->median_m = 0;
state->median_error = 0;
}
trace_cros_ec_sensorhub_filter(state, dx, dy);
}
/**
* cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP
* timebase
*
* @state: filter information.
* @x: any ec timestamp (us):
*
* cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase
* cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ
* should have happened on the AP, with low jitter
*
* Note: The filter will only activate once state->history_len goes
* over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a
* transform.
*
* How to derive the formula, starting from:
* f(x) = median_m * x + median_error
* That's the calculated AP - EC offset (at the x point in time)
* Undo the coordinate system transform:
* f(x) = median_m * (x - x_offset) + median_error + y_offset
* Remember to undo the "y = c - b * 1000" modification:
* f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000
*
* Return: timestamp in AP timebase (ns)
*/
static s64
cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
s64 x)
{
return div_s64(state->median_m * (x - state->x_offset), M_PRECISION)
+ state->median_error + state->y_offset + x * 1000;
}
/*
* Since a and b were originally 32 bit values from the EC,
* they overflow relatively often, casting is not enough, so we need to
* add an offset.
*/
static void
cros_ec_sensor_ring_fix_overflow(s64 *ts,
const s64 overflow_period,
struct cros_ec_sensors_ec_overflow_state
*state)
{
s64 adjust;
*ts += state->offset;
if (abs(state->last - *ts) > (overflow_period / 2)) {
adjust = state->last > *ts ? overflow_period : -overflow_period;
state->offset += adjust;
*ts += adjust;
}
state->last = *ts;
}
static void
cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub
*sensorhub,
struct cros_ec_sensors_ring_sample
*sample)
{
const u8 sensor_id = sample->sensor_id;
/* If this event is earlier than one we saw before... */
if (sensorhub->batch_state[sensor_id].newest_sensor_event >
sample->timestamp)
/* mark it for spreading. */
sample->timestamp =
sensorhub->batch_state[sensor_id].last_ts;
else
sensorhub->batch_state[sensor_id].newest_sensor_event =
sample->timestamp;
}
/**
* cros_ec_sensor_ring_process_event() - Process one EC FIFO event
*
* @sensorhub: Sensor Hub object.
* @fifo_info: FIFO information from the EC (includes b point, EC timebase).
* @fifo_timestamp: EC IRQ, kernel timebase (aka c).
* @current_timestamp: calculated event timestamp, kernel timebase (aka a').
* @in: incoming FIFO event from EC (includes a point, EC timebase).
* @out: outgoing event to user space (includes a').
*
* Process one EC event, add it in the ring if necessary.
*
* Return: true if out event has been populated.
*/
static bool
cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
const struct ec_response_motion_sense_fifo_info
*fifo_info,
const ktime_t fifo_timestamp,
ktime_t *current_timestamp,
struct ec_response_motion_sensor_data *in,
struct cros_ec_sensors_ring_sample *out)
{
const s64 now = cros_ec_get_time_ns();
int axis, async_flags;
/* Do not populate the filter based on asynchronous events. */
async_flags = in->flags &
(MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH);
if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) {
s64 a = in->timestamp;
s64 b = fifo_info->timestamp;
s64 c = fifo_timestamp;
cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32,
&sensorhub->overflow_a);
cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32,
&sensorhub->overflow_b);
if (sensorhub->tight_timestamps) {
cros_ec_sensor_ring_ts_filter_update(
&sensorhub->filter, b, c);
*current_timestamp = cros_ec_sensor_ring_ts_filter(
&sensorhub->filter, a);
} else {
s64 new_timestamp;
/*
* Disable filtering since we might add more jitter
* if b is in a random point in time.
*/
new_timestamp = c - b * 1000 + a * 1000;
/*
* The timestamp can be stale if we had to use the fifo
* info timestamp.
*/
if (new_timestamp - *current_timestamp > 0)
*current_timestamp = new_timestamp;
}
trace_cros_ec_sensorhub_timestamp(in->timestamp,
fifo_info->timestamp,
fifo_timestamp,
*current_timestamp,
now);
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
if (sensorhub->tight_timestamps) {
sensorhub->batch_state[in->sensor_num].last_len = 0;
sensorhub->batch_state[in->sensor_num].penul_len = 0;
}
/*
* ODR change is only useful for the sensor_ring, it does not
* convey information to clients.
*/
return false;
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
out->sensor_id = in->sensor_num;
out->timestamp = *current_timestamp;
out->flag = in->flags;
if (sensorhub->tight_timestamps)
sensorhub->batch_state[out->sensor_id].last_len = 0;
/*
* No other payload information provided with
* flush ack.
*/
return true;
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP)
/* If we just have a timestamp, skip this entry. */
return false;
/* Regular sample */
out->sensor_id = in->sensor_num;
trace_cros_ec_sensorhub_data(in->sensor_num,
fifo_info->timestamp,
fifo_timestamp,
*current_timestamp,
now);
if (*current_timestamp - now > 0) {
/*
* This fix is needed to overcome the timestamp filter putting
* events in the future.
*/
sensorhub->future_timestamp_total_ns +=
*current_timestamp - now;
if (++sensorhub->future_timestamp_count ==
FUTURE_TS_ANALYTICS_COUNT_MAX) {
s64 avg = div_s64(sensorhub->future_timestamp_total_ns,
sensorhub->future_timestamp_count);
dev_warn_ratelimited(sensorhub->dev,
"100 timestamps in the future, %lldns shaved on average\n",
avg);
sensorhub->future_timestamp_count = 0;
sensorhub->future_timestamp_total_ns = 0;
}
out->timestamp = now;
} else {
out->timestamp = *current_timestamp;
}
out->flag = in->flags;
for (axis = 0; axis < 3; axis++)
out->vector[axis] = in->data[axis];
if (sensorhub->tight_timestamps)
cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out);
return true;
}
/*
* cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to
* ringbuffer.
*
* This is the new spreading code, assumes every sample's timestamp
* preceeds the sample. Run if tight_timestamps == true.
*
* Sometimes the EC receives only one interrupt (hence timestamp) for
* a batch of samples. Only the first sample will have the correct
* timestamp. So we must interpolate the other samples.
* We use the previous batch timestamp and our current batch timestamp
* as a way to calculate period, then spread the samples evenly.
*
* s0 int, 0ms
* s1 int, 10ms
* s2 int, 20ms
* 30ms point goes by, no interrupt, previous one is still asserted
* downloading s2 and s3
* s3 sample, 20ms (incorrect timestamp)
* s4 int, 40ms
*
* The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
* has 2 samples in them, we adjust the timestamp of s3.
* s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
* been part of a bigger batch things would have gotten a little
* more complicated.
*
* Note: we also assume another sensor sample doesn't break up a batch
* in 2 or more partitions. Example, there can't ever be a sync sensor
* in between S2 and S3. This simplifies the following code.
*/
static void
cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub,
unsigned long sensor_mask,
struct cros_ec_sensors_ring_sample *last_out)
{
struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
int id;
for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) {
for (batch_start = sensorhub->ring; batch_start < last_out;
batch_start = next_batch_start) {
/*
* For each batch (where all samples have the same
* timestamp).
*/
int batch_len, sample_idx;
struct cros_ec_sensors_ring_sample *batch_end =
batch_start;
struct cros_ec_sensors_ring_sample *s;
s64 batch_timestamp = batch_start->timestamp;
s64 sample_period;
/*
* Skip over batches that start with the sensor types
* we're not looking at right now.
*/
if (batch_start->sensor_id != id) {
next_batch_start = batch_start + 1;
continue;
}
/*
* Do not start a batch
* from a flush, as it happens asynchronously to the
* regular flow of events.
*/
if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
cros_sensorhub_send_sample(sensorhub,
batch_start);
next_batch_start = batch_start + 1;
continue;
}
if (batch_start->timestamp <=
sensorhub->batch_state[id].last_ts) {
batch_timestamp =
sensorhub->batch_state[id].last_ts;
batch_len = sensorhub->batch_state[id].last_len;
sample_idx = batch_len;
sensorhub->batch_state[id].last_ts =
sensorhub->batch_state[id].penul_ts;
sensorhub->batch_state[id].last_len =
sensorhub->batch_state[id].penul_len;
} else {
/*
* Push first sample in the batch to the,
* kifo, it's guaranteed to be correct, the
* rest will follow later on.
*/
sample_idx = 1;
batch_len = 1;
cros_sensorhub_send_sample(sensorhub,
batch_start);
batch_start++;
}
/* Find all samples have the same timestamp. */
for (s = batch_start; s < last_out; s++) {
if (s->sensor_id != id)
/*
* Skip over other sensor types that
* are interleaved, don't count them.
*/
continue;
if (s->timestamp != batch_timestamp)
/* we discovered the next batch */
break;
if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
/* break on flush packets */
break;
batch_end = s;
batch_len++;
}
if (batch_len == 1)
goto done_with_this_batch;
/* Can we calculate period? */
if (sensorhub->batch_state[id].last_len == 0) {
dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n",
id, batch_len - 1);
goto done_with_this_batch;
/*
* Note: we're dropping the rest of the samples
* in this batch since we have no idea where
* they're supposed to go without a period
* calculation.
*/
}
sample_period = div_s64(batch_timestamp -
sensorhub->batch_state[id].last_ts,
sensorhub->batch_state[id].last_len);
dev_dbg(sensorhub->dev,
"Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n",
batch_len, id,
sensorhub->batch_state[id].last_ts,
sensorhub->batch_state[id].last_len,
batch_timestamp,
sample_period);
/*
* Adjust timestamps of the samples then push them to
* kfifo.
*/
for (s = batch_start; s <= batch_end; s++) {
if (s->sensor_id != id)
/*
* Skip over other sensor types that
* are interleaved, don't change them.
*/
continue;
s->timestamp = batch_timestamp +
sample_period * sample_idx;
sample_idx++;
cros_sensorhub_send_sample(sensorhub, s);
}
done_with_this_batch:
sensorhub->batch_state[id].penul_ts =
sensorhub->batch_state[id].last_ts;
sensorhub->batch_state[id].penul_len =
sensorhub->batch_state[id].last_len;
sensorhub->batch_state[id].last_ts =
batch_timestamp;
sensorhub->batch_state[id].last_len = batch_len;
next_batch_start = batch_end + 1;
}
}
}
/*
* cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
* add to ringbuffer (legacy).
*
* Note: This assumes we're running old firmware, where timestamp
* is inserted after its sample(s)e. There can be several samples between
* timestamps, so several samples can have the same timestamp.
*
* timestamp | count
* -----------------
* 1st sample --> TS1 | 1
* TS2 | 2
* TS2 | 3
* TS3 | 4
* last_out -->
*
*
* We spread time for the samples using perod p = (current - TS1)/4.
* between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp].
*
*/
static void
cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
unsigned long sensor_mask,
s64 current_timestamp,
struct cros_ec_sensors_ring_sample
*last_out)
{
struct cros_ec_sensors_ring_sample *out;
int i;
for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
s64 timestamp;
int count = 0;
s64 time_period;
for (out = sensorhub->ring; out < last_out; out++) {
if (out->sensor_id != i)
continue;
/* Timestamp to start with */
timestamp = out->timestamp;
out++;
count = 1;
break;
}
for (; out < last_out; out++) {
/* Find last sample. */
if (out->sensor_id != i)
continue;
count++;
}
if (count == 0)
continue;
/* Spread uniformly between the first and last samples. */
time_period = div_s64(current_timestamp - timestamp, count);
for (out = sensorhub->ring; out < last_out; out++) {
if (out->sensor_id != i)
continue;
timestamp += time_period;
out->timestamp = timestamp;
}
}
/* Push the event into the kfifo */
for (out = sensorhub->ring; out < last_out; out++)
cros_sensorhub_send_sample(sensorhub, out);
}
/**
* cros_ec_sensorhub_ring_handler() - The trigger handler function
*
* @sensorhub: Sensor Hub object.
*
* Called by the notifier, process the EC sensor FIFO queue.
*/
static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
{
struct ec_response_motion_sense_fifo_info *fifo_info =
sensorhub->fifo_info;
struct cros_ec_dev *ec = sensorhub->ec;
ktime_t fifo_timestamp, current_timestamp;
int i, j, number_data, ret;
unsigned long sensor_mask = 0;
struct ec_response_motion_sensor_data *in;
struct cros_ec_sensors_ring_sample *out, *last_out;
mutex_lock(&sensorhub->cmd_lock);
/* Get FIFO information if there are lost vectors. */
if (fifo_info->total_lost) {
int fifo_info_length =
sizeof(struct ec_response_motion_sense_fifo_info) +
sizeof(u16) * sensorhub->sensor_num;
/* Need to retrieve the number of lost vectors per sensor */
sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
sensorhub->msg->outsize = 1;
sensorhub->msg->insize = fifo_info_length;
if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0)
goto error;
memcpy(fifo_info, &sensorhub->resp->fifo_info,
fifo_info_length);
/*
* Update collection time, will not be as precise as the
* non-error case.
*/
fifo_timestamp = cros_ec_get_time_ns();
} else {
fifo_timestamp = sensorhub->fifo_timestamp[
CROS_EC_SENSOR_NEW_TS];
}
if (fifo_info->count > sensorhub->fifo_size ||
fifo_info->size != sensorhub->fifo_size) {
dev_warn(sensorhub->dev,
"Mismatch EC data: count %d, size %d - expected %d\n",
fifo_info->count, fifo_info->size,
sensorhub->fifo_size);
goto error;
}
/* Copy elements in the main fifo */
current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS];
out = sensorhub->ring;
for (i = 0; i < fifo_info->count; i += number_data) {
sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ;
sensorhub->params->fifo_read.max_data_vector =
fifo_info->count - i;
sensorhub->msg->outsize =
sizeof(struct ec_params_motion_sense);
sensorhub->msg->insize =
sizeof(sensorhub->resp->fifo_read) +
sensorhub->params->fifo_read.max_data_vector *
sizeof(struct ec_response_motion_sensor_data);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
if (ret < 0) {
dev_warn(sensorhub->dev, "Fifo error: %d\n", ret);
break;
}
number_data = sensorhub->resp->fifo_read.number_data;
if (number_data == 0) {
dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n");
break;
}
if (number_data > fifo_info->count - i) {
dev_warn(sensorhub->dev,
"Invalid EC data: too many entry received: %d, expected %d\n",
number_data, fifo_info->count - i);
break;
}
if (out + number_data >
sensorhub->ring + fifo_info->count) {
dev_warn(sensorhub->dev,
"Too many samples: %d (%zd data) to %d entries for expected %d entries\n",
i, out - sensorhub->ring, i + number_data,
fifo_info->count);
break;
}
for (in = sensorhub->resp->fifo_read.data, j = 0;
j < number_data; j++, in++) {
if (cros_ec_sensor_ring_process_event(
sensorhub, fifo_info,
fifo_timestamp,
¤t_timestamp,
in, out)) {
sensor_mask |= BIT(in->sensor_num);
out++;
}
}
}
mutex_unlock(&sensorhub->cmd_lock);
last_out = out;
if (out == sensorhub->ring)
/* Unexpected empty FIFO. */
goto ring_handler_end;
/*
* Check if current_timestamp is ahead of the last sample. Normally,
* the EC appends a timestamp after the last sample, but if the AP
* is slow to respond to the IRQ, the EC may have added new samples.
* Use the FIFO info timestamp as last timestamp then.
*/
if (!sensorhub->tight_timestamps &&
(last_out - 1)->timestamp == current_timestamp)
current_timestamp = fifo_timestamp;
/* Warn on lost samples. */
if (fifo_info->total_lost)
for (i = 0; i < sensorhub->sensor_num; i++) {
if (fifo_info->lost[i]) {
dev_warn_ratelimited(sensorhub->dev,
"Sensor %d: lost: %d out of %d\n",
i, fifo_info->lost[i],
fifo_info->total_lost);
if (sensorhub->tight_timestamps)
sensorhub->batch_state[i].last_len = 0;
}
}
/*
* Spread samples in case of batching, then add them to the
* ringbuffer.
*/
if (sensorhub->tight_timestamps)
cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask,
last_out);
else
cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask,
current_timestamp,
last_out);
ring_handler_end:
sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp;
return;
error:
mutex_unlock(&sensorhub->cmd_lock);
}
static int cros_ec_sensorhub_event(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *_notify)
{
struct cros_ec_sensorhub *sensorhub;
struct cros_ec_device *ec_dev;
sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier);
ec_dev = sensorhub->ec->ec_dev;
if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO)
return NOTIFY_DONE;
if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) {
dev_warn(ec_dev->dev, "Invalid fifo info size\n");
return NOTIFY_DONE;
}
if (queued_during_suspend)
return NOTIFY_OK;
memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info,
sizeof(*sensorhub->fifo_info));
sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] =
ec_dev->last_event_time;
cros_ec_sensorhub_ring_handler(sensorhub);
return NOTIFY_OK;
}
/**
* cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC
* supports it.
*
* @sensorhub : Sensor Hub object.
*
* Return: 0 on success.
*/
int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub)
{
int fifo_info_length =
sizeof(struct ec_response_motion_sense_fifo_info) +
sizeof(u16) * sensorhub->sensor_num;
/* Allocate the array for lost events. */
sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length,
GFP_KERNEL);
if (!sensorhub->fifo_info)
return -ENOMEM;
/*
* Allocate the callback area based on the number of sensors.
* Add one for the sensor ring.
*/
sensorhub->push_data = devm_kcalloc(sensorhub->dev,
sensorhub->sensor_num,
sizeof(*sensorhub->push_data),
GFP_KERNEL);
if (!sensorhub->push_data)
return -ENOMEM;
sensorhub->tight_timestamps = cros_ec_check_features(
sensorhub->ec,
EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
if (sensorhub->tight_timestamps) {
sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
sensorhub->sensor_num,
sizeof(*sensorhub->batch_state),
GFP_KERNEL);
if (!sensorhub->batch_state)
return -ENOMEM;
}
return 0;
}
/**
* cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
* supports it.
*
* @sensorhub : Sensor Hub object.
*
* Return: 0 on success.
*/
int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
{
struct cros_ec_dev *ec = sensorhub->ec;
int ret;
int fifo_info_length =
sizeof(struct ec_response_motion_sense_fifo_info) +
sizeof(u16) * sensorhub->sensor_num;
/* Retrieve FIFO information */
sensorhub->msg->version = 2;
sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
sensorhub->msg->outsize = 1;
sensorhub->msg->insize = fifo_info_length;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
if (ret < 0)
return ret;
/*
* Allocate the full fifo. We need to copy the whole FIFO to set
* timestamps properly.
*/
sensorhub->fifo_size = sensorhub->resp->fifo_info.size;
sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size,
sizeof(*sensorhub->ring), GFP_KERNEL);
if (!sensorhub->ring)
return -ENOMEM;
sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
cros_ec_get_time_ns();
/* Register the notifier that will act as a top half interrupt. */
sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
&sensorhub->notifier);
if (ret < 0)
return ret;
/* Start collection samples. */
return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
}
void cros_ec_sensorhub_ring_remove(void *arg)
{
struct cros_ec_sensorhub *sensorhub = arg;
struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev;
/* Disable the ring, prevent EC interrupt to the AP for nothing. */
cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
blocking_notifier_chain_unregister(&ec_dev->event_notifier,
&sensorhub->notifier);
}
| linux-master | drivers/platform/chrome/cros_ec_sensorhub_ring.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the ChromeOS human presence sensor (HPS), attached via I2C.
*
* The driver exposes HPS as a character device, although currently no read or
* write operations are supported. Instead, the driver only controls the power
* state of the sensor, keeping it on only while userspace holds an open file
* descriptor to the HPS device.
*
* Copyright 2022 Google LLC.
*/
#include <linux/acpi.h>
#include <linux/fs.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#define HPS_ACPI_ID "GOOG0020"
struct hps_drvdata {
struct i2c_client *client;
struct miscdevice misc_device;
struct gpio_desc *enable_gpio;
};
static void hps_set_power(struct hps_drvdata *hps, bool state)
{
gpiod_set_value_cansleep(hps->enable_gpio, state);
}
static int hps_open(struct inode *inode, struct file *file)
{
struct hps_drvdata *hps = container_of(file->private_data,
struct hps_drvdata, misc_device);
struct device *dev = &hps->client->dev;
return pm_runtime_resume_and_get(dev);
}
static int hps_release(struct inode *inode, struct file *file)
{
struct hps_drvdata *hps = container_of(file->private_data,
struct hps_drvdata, misc_device);
struct device *dev = &hps->client->dev;
return pm_runtime_put(dev);
}
static const struct file_operations hps_fops = {
.owner = THIS_MODULE,
.open = hps_open,
.release = hps_release,
};
static int hps_i2c_probe(struct i2c_client *client)
{
struct hps_drvdata *hps;
int ret;
hps = devm_kzalloc(&client->dev, sizeof(*hps), GFP_KERNEL);
if (!hps)
return -ENOMEM;
hps->misc_device.parent = &client->dev;
hps->misc_device.minor = MISC_DYNAMIC_MINOR;
hps->misc_device.name = "cros-hps";
hps->misc_device.fops = &hps_fops;
i2c_set_clientdata(client, hps);
hps->client = client;
/*
* HPS is powered on from firmware before entering the kernel, so we
* acquire the line with GPIOD_OUT_HIGH here to preserve the existing
* state. The peripheral is powered off after successful probe below.
*/
hps->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_HIGH);
if (IS_ERR(hps->enable_gpio)) {
ret = PTR_ERR(hps->enable_gpio);
dev_err(&client->dev, "failed to get enable gpio: %d\n", ret);
return ret;
}
ret = misc_register(&hps->misc_device);
if (ret) {
dev_err(&client->dev, "failed to initialize misc device: %d\n", ret);
return ret;
}
hps_set_power(hps, false);
pm_runtime_enable(&client->dev);
return 0;
}
static void hps_i2c_remove(struct i2c_client *client)
{
struct hps_drvdata *hps = i2c_get_clientdata(client);
pm_runtime_disable(&client->dev);
misc_deregister(&hps->misc_device);
/*
* Re-enable HPS, in order to return it to its default state
* (i.e. powered on).
*/
hps_set_power(hps, true);
}
static int hps_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct hps_drvdata *hps = i2c_get_clientdata(client);
hps_set_power(hps, false);
return 0;
}
static int hps_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct hps_drvdata *hps = i2c_get_clientdata(client);
hps_set_power(hps, true);
return 0;
}
static UNIVERSAL_DEV_PM_OPS(hps_pm_ops, hps_suspend, hps_resume, NULL);
static const struct i2c_device_id hps_i2c_id[] = {
{ "cros-hps", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, hps_i2c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id hps_acpi_id[] = {
{ HPS_ACPI_ID, 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, hps_acpi_id);
#endif /* CONFIG_ACPI */
static struct i2c_driver hps_i2c_driver = {
.probe = hps_i2c_probe,
.remove = hps_i2c_remove,
.id_table = hps_i2c_id,
.driver = {
.name = "cros-hps",
.pm = &hps_pm_ops,
.acpi_match_table = ACPI_PTR(hps_acpi_id),
},
};
module_i2c_driver(hps_i2c_driver);
MODULE_ALIAS("acpi:" HPS_ACPI_ID);
MODULE_AUTHOR("Sami Kyöstilä <[email protected]>");
MODULE_DESCRIPTION("Driver for ChromeOS HPS");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_hps_i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UART interface for ChromeOS Embedded Controller
*
* Copyright 2020-2022 Google LLC.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/serdev.h>
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
#include "cros_ec.h"
/*
* EC sends contiguous bytes of response packet on UART AP RX.
* TTY driver in AP accumulates incoming bytes and calls the registered callback
* function. Byte count can range from 1 to MAX bytes supported by EC.
* This driver should wait for long time for all callbacks to be processed.
* Considering the worst case scenario, wait for 500 msec. This timeout should
* account for max latency and some additional guard time.
* Best case: Entire packet is received in ~200 ms, wait queue will be released
* and packet will be processed.
* Worst case: TTY driver sends bytes in multiple callbacks. In this case this
* driver will wait for ~1 sec beyond which it will timeout.
* This timeout value should not exceed ~500 msec because in case if
* EC_CMD_REBOOT_EC sent, high level driver should be able to intercept EC
* in RO.
*/
#define EC_MSG_DEADLINE_MS 500
/**
* struct response_info - Encapsulate EC response related
* information for passing between function
* cros_ec_uart_pkt_xfer() and cros_ec_uart_rx_bytes()
* callback.
* @data: Copy the data received from EC here.
* @max_size: Max size allocated for the @data buffer. If the
* received data exceeds this value, we log an error.
* @size: Actual size of data received from EC. This is also
* used to accumulate byte count with response is received
* in dma chunks.
* @exp_len: Expected bytes of response from EC including header.
* @status: Re-init to 0 before sending a cmd. Updated to 1 when
* a response is successfully received, or an error number
* on failure.
* @wait_queue: Wait queue EC response where the cros_ec sends request
* to EC and waits
*/
struct response_info {
void *data;
size_t max_size;
size_t size;
size_t exp_len;
int status;
wait_queue_head_t wait_queue;
};
/**
* struct cros_ec_uart - information about a uart-connected EC
*
* @serdev: serdev uart device we are connected to.
* @baudrate: UART baudrate of attached EC device.
* @flowcontrol: UART flowcontrol of attached device.
* @irq: Linux IRQ number of associated serial device.
* @response: Response info passing between cros_ec_uart_pkt_xfer()
* and cros_ec_uart_rx_bytes()
*/
struct cros_ec_uart {
struct serdev_device *serdev;
u32 baudrate;
u8 flowcontrol;
u32 irq;
struct response_info response;
};
static int cros_ec_uart_rx_bytes(struct serdev_device *serdev,
const u8 *data,
size_t count)
{
struct ec_host_response *host_response;
struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
struct cros_ec_uart *ec_uart = ec_dev->priv;
struct response_info *resp = &ec_uart->response;
/* Check if bytes were sent out of band */
if (!resp->data) {
/* Discard all bytes */
dev_warn(ec_dev->dev, "Bytes received out of band, dropping them.\n");
return count;
}
/*
* Check if incoming bytes + resp->size is greater than allocated
* buffer in din by cros_ec. This will ensure that if EC sends more
* bytes than max_size, waiting process will be notified with an error.
*/
if (resp->size + count > resp->max_size) {
resp->status = -EMSGSIZE;
wake_up(&resp->wait_queue);
return count;
}
memcpy(resp->data + resp->size, data, count);
resp->size += count;
/* Read data_len if we received response header and if exp_len was not read before. */
if (resp->size >= sizeof(*host_response) && resp->exp_len == 0) {
host_response = (struct ec_host_response *)resp->data;
resp->exp_len = host_response->data_len + sizeof(*host_response);
}
/* If driver received response header and payload from EC, wake up the wait queue. */
if (resp->size >= sizeof(*host_response) && resp->size == resp->exp_len) {
resp->status = 1;
wake_up(&resp->wait_queue);
}
return count;
}
static int cros_ec_uart_pkt_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *ec_msg)
{
struct cros_ec_uart *ec_uart = ec_dev->priv;
struct serdev_device *serdev = ec_uart->serdev;
struct response_info *resp = &ec_uart->response;
struct ec_host_response *host_response;
unsigned int len;
int ret, i;
u8 sum;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "Prepared len=%d\n", len);
/* Setup for incoming response */
resp->data = ec_dev->din;
resp->max_size = ec_dev->din_size;
resp->size = 0;
resp->exp_len = 0;
resp->status = 0;
ret = serdev_device_write_buf(serdev, ec_dev->dout, len);
if (ret < 0 || ret < len) {
dev_err(ec_dev->dev, "Unable to write data\n");
if (ret >= 0)
ret = -EIO;
goto exit;
}
ret = wait_event_timeout(resp->wait_queue, resp->status,
msecs_to_jiffies(EC_MSG_DEADLINE_MS));
if (ret == 0) {
dev_warn(ec_dev->dev, "Timed out waiting for response.\n");
ret = -ETIMEDOUT;
goto exit;
}
if (resp->status < 0) {
ret = resp->status;
dev_warn(ec_dev->dev, "Error response received: %d\n", ret);
goto exit;
}
host_response = (struct ec_host_response *)ec_dev->din;
ec_msg->result = host_response->result;
if (host_response->data_len > ec_msg->insize) {
dev_err(ec_dev->dev, "Resp too long (%d bytes, expected %d)\n",
host_response->data_len, ec_msg->insize);
ret = -ENOSPC;
goto exit;
}
/* Validate checksum */
sum = 0;
for (i = 0; i < sizeof(*host_response) + host_response->data_len; i++)
sum += ec_dev->din[i];
if (sum) {
dev_err(ec_dev->dev, "Bad packet checksum calculated %x\n", sum);
ret = -EBADMSG;
goto exit;
}
memcpy(ec_msg->data, ec_dev->din + sizeof(*host_response), host_response->data_len);
ret = host_response->data_len;
exit:
/* Invalidate response buffer to guard against out of band rx data */
resp->data = NULL;
if (ec_msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
static int cros_ec_uart_resource(struct acpi_resource *ares, void *data)
{
struct cros_ec_uart *ec_uart = data;
struct acpi_resource_uart_serialbus *sb = &ares->data.uart_serial_bus;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS &&
sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) {
ec_uart->baudrate = sb->default_baud_rate;
dev_dbg(&ec_uart->serdev->dev, "Baudrate %d\n", ec_uart->baudrate);
ec_uart->flowcontrol = sb->flow_control;
dev_dbg(&ec_uart->serdev->dev, "Flow control %d\n", ec_uart->flowcontrol);
}
return 0;
}
static int cros_ec_uart_acpi_probe(struct cros_ec_uart *ec_uart)
{
int ret;
LIST_HEAD(resources);
struct acpi_device *adev = ACPI_COMPANION(&ec_uart->serdev->dev);
ret = acpi_dev_get_resources(adev, &resources, cros_ec_uart_resource, ec_uart);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resources);
/* Retrieve GpioInt and translate it to Linux IRQ number */
ret = acpi_dev_gpio_irq_get(adev, 0);
if (ret < 0)
return ret;
ec_uart->irq = ret;
dev_dbg(&ec_uart->serdev->dev, "IRQ number %d\n", ec_uart->irq);
return 0;
}
static const struct serdev_device_ops cros_ec_uart_client_ops = {
.receive_buf = cros_ec_uart_rx_bytes,
};
static int cros_ec_uart_probe(struct serdev_device *serdev)
{
struct device *dev = &serdev->dev;
struct cros_ec_device *ec_dev;
struct cros_ec_uart *ec_uart;
int ret;
ec_uart = devm_kzalloc(dev, sizeof(*ec_uart), GFP_KERNEL);
if (!ec_uart)
return -ENOMEM;
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
ret = devm_serdev_device_open(dev, serdev);
if (ret) {
dev_err(dev, "Unable to open UART device");
return ret;
}
serdev_device_set_drvdata(serdev, ec_dev);
init_waitqueue_head(&ec_uart->response.wait_queue);
ec_uart->serdev = serdev;
ret = cros_ec_uart_acpi_probe(ec_uart);
if (ret < 0) {
dev_err(dev, "Failed to get ACPI info (%d)", ret);
return ret;
}
ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
if (ret < 0) {
dev_err(dev, "Failed to set up host baud rate (%d)", ret);
return ret;
}
serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
ec_dev->dev = dev;
ec_dev->priv = ec_uart;
ec_dev->irq = ec_uart->irq;
ec_dev->cmd_xfer = NULL;
ec_dev->pkt_xfer = cros_ec_uart_pkt_xfer;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
return cros_ec_register(ec_dev);
}
static void cros_ec_uart_remove(struct serdev_device *serdev)
{
struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
cros_ec_unregister(ec_dev);
};
static int __maybe_unused cros_ec_uart_suspend(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_suspend(ec_dev);
}
static int __maybe_unused cros_ec_uart_resume(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_resume(ec_dev);
}
static SIMPLE_DEV_PM_OPS(cros_ec_uart_pm_ops, cros_ec_uart_suspend,
cros_ec_uart_resume);
static const struct of_device_id cros_ec_uart_of_match[] = {
{ .compatible = "google,cros-ec-uart" },
{}
};
MODULE_DEVICE_TABLE(of, cros_ec_uart_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_ec_uart_acpi_id[] = {
{ "GOOG0019", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, cros_ec_uart_acpi_id);
#endif
static struct serdev_device_driver cros_ec_uart_driver = {
.driver = {
.name = "cros-ec-uart",
.acpi_match_table = ACPI_PTR(cros_ec_uart_acpi_id),
.of_match_table = cros_ec_uart_of_match,
.pm = &cros_ec_uart_pm_ops,
},
.probe = cros_ec_uart_probe,
.remove = cros_ec_uart_remove,
};
module_serdev_device_driver(cros_ec_uart_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("UART interface for ChromeOS Embedded Controller");
MODULE_AUTHOR("Bhanu Prakash Maiya <[email protected]>");
| linux-master | drivers/platform/chrome/cros_ec_uart.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Google LLC
*
* This driver provides the ability to configure Type-C muxes and retimers which are controlled by
* the ChromeOS EC.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
/* Handles and other relevant data required for each port's switches. */
struct cros_typec_port {
int port_num;
struct typec_mux_dev *mode_switch;
struct typec_retimer *retimer;
struct cros_typec_switch_data *sdata;
};
/* Driver-specific data. */
struct cros_typec_switch_data {
struct device *dev;
struct cros_ec_device *ec;
struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
};
static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port_num, u8 index,
u8 state)
{
struct ec_params_typec_control req = {
.port = port_num,
.command = TYPEC_CONTROL_COMMAND_USB_MUX_SET,
.mux_params = {
.mux_index = index,
.mux_flags = state,
},
};
return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
}
static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt)
{
int ret = -EOPNOTSUPP;
u8 pin_assign;
if (mode == TYPEC_STATE_SAFE) {
ret = USB_PD_MUX_SAFE_MODE;
} else if (mode == TYPEC_STATE_USB) {
ret = USB_PD_MUX_USB_ENABLED;
} else if (alt && alt->svid == USB_TYPEC_DP_SID) {
ret = USB_PD_MUX_DP_ENABLED;
pin_assign = mode - TYPEC_STATE_MODAL;
if (pin_assign & DP_PIN_ASSIGN_D)
ret |= USB_PD_MUX_USB_ENABLED;
}
return ret;
}
static int cros_typec_send_clear_event(struct cros_typec_switch_data *sdata, int port_num,
u32 events_mask)
{
struct ec_params_typec_control req = {
.port = port_num,
.command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
.clear_events_mask = events_mask,
};
return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
}
static bool cros_typec_check_event(struct cros_typec_switch_data *sdata, int port_num, u32 mask)
{
struct ec_response_typec_status resp;
struct ec_params_typec_status req = {
.port = port_num,
};
int ret;
ret = cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
&resp, sizeof(resp));
if (ret < 0) {
dev_warn(sdata->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
return false;
}
if (resp.events & mask)
return true;
return false;
}
/*
* The ChromeOS EC treats both mode-switches and retimers as "muxes" for the purposes of the
* host command API. This common function configures and verifies the retimer/mode-switch
* according to the provided setting.
*/
static int cros_typec_configure_mux(struct cros_typec_switch_data *sdata, int port_num, int index,
unsigned long mode, struct typec_altmode *alt)
{
unsigned long end;
u32 event_mask;
u8 mux_state;
int ret;
ret = cros_typec_get_mux_state(mode, alt);
if (ret < 0)
return ret;
mux_state = (u8)ret;
/* Clear any old mux set done event. */
if (index == 0)
event_mask = PD_STATUS_EVENT_MUX_0_SET_DONE;
else
event_mask = PD_STATUS_EVENT_MUX_1_SET_DONE;
ret = cros_typec_send_clear_event(sdata, port_num, event_mask);
if (ret < 0)
return ret;
/* Send the set command. */
ret = cros_typec_cmd_mux_set(sdata, port_num, index, mux_state);
if (ret < 0)
return ret;
/* Check for the mux set done event. */
end = jiffies + msecs_to_jiffies(1000);
do {
if (cros_typec_check_event(sdata, port_num, event_mask))
return 0;
usleep_range(500, 1000);
} while (time_before(jiffies, end));
dev_err(sdata->dev, "Timed out waiting for mux set done on index: %d, state: %d\n",
index, mux_state);
return -ETIMEDOUT;
}
static int cros_typec_mode_switch_set(struct typec_mux_dev *mode_switch,
struct typec_mux_state *state)
{
struct cros_typec_port *port = typec_mux_get_drvdata(mode_switch);
/* Mode switches have index 0. */
return cros_typec_configure_mux(port->sdata, port->port_num, 0, state->mode, state->alt);
}
static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
{
struct cros_typec_port *port = typec_retimer_get_drvdata(retimer);
/* Retimers have index 1. */
return cros_typec_configure_mux(port->sdata, port->port_num, 1, state->mode, state->alt);
}
static void cros_typec_unregister_switches(struct cros_typec_switch_data *sdata)
{
int i;
for (i = 0; i < EC_USB_PD_MAX_PORTS; i++) {
if (!sdata->ports[i])
continue;
typec_retimer_unregister(sdata->ports[i]->retimer);
typec_mux_unregister(sdata->ports[i]->mode_switch);
}
}
static int cros_typec_register_mode_switch(struct cros_typec_port *port,
struct fwnode_handle *fwnode)
{
struct typec_mux_desc mode_switch_desc = {
.fwnode = fwnode,
.drvdata = port,
.name = fwnode_get_name(fwnode),
.set = cros_typec_mode_switch_set,
};
port->mode_switch = typec_mux_register(port->sdata->dev, &mode_switch_desc);
return PTR_ERR_OR_ZERO(port->mode_switch);
}
static int cros_typec_register_retimer(struct cros_typec_port *port, struct fwnode_handle *fwnode)
{
struct typec_retimer_desc retimer_desc = {
.fwnode = fwnode,
.drvdata = port,
.name = fwnode_get_name(fwnode),
.set = cros_typec_retimer_set,
};
port->retimer = typec_retimer_register(port->sdata->dev, &retimer_desc);
return PTR_ERR_OR_ZERO(port->retimer);
}
static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
{
struct cros_typec_port *port;
struct device *dev = sdata->dev;
struct fwnode_handle *fwnode;
struct acpi_device *adev;
unsigned long long index;
int nports, ret;
nports = device_get_child_node_count(dev);
if (nports == 0) {
dev_err(dev, "No switch devices found.\n");
return -ENODEV;
}
device_for_each_child_node(dev, fwnode) {
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port) {
ret = -ENOMEM;
goto err_switch;
}
adev = to_acpi_device_node(fwnode);
if (!adev) {
dev_err(fwnode->dev, "Couldn't get ACPI device handle\n");
ret = -ENODEV;
goto err_switch;
}
ret = acpi_evaluate_integer(adev->handle, "_ADR", NULL, &index);
if (ACPI_FAILURE(ret)) {
dev_err(fwnode->dev, "_ADR wasn't evaluated\n");
ret = -ENODATA;
goto err_switch;
}
if (index >= EC_USB_PD_MAX_PORTS) {
dev_err(fwnode->dev, "Invalid port index number: %llu\n", index);
ret = -EINVAL;
goto err_switch;
}
port->sdata = sdata;
port->port_num = index;
sdata->ports[index] = port;
if (fwnode_property_present(fwnode, "retimer-switch")) {
ret = cros_typec_register_retimer(port, fwnode);
if (ret) {
dev_err(dev, "Retimer switch register failed\n");
goto err_switch;
}
dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
}
if (!fwnode_property_present(fwnode, "mode-switch"))
continue;
ret = cros_typec_register_mode_switch(port, fwnode);
if (ret) {
dev_err(dev, "Mode switch register failed\n");
goto err_switch;
}
dev_dbg(dev, "Mode switch registered for index %llu\n", index);
}
return 0;
err_switch:
fwnode_handle_put(fwnode);
cros_typec_unregister_switches(sdata);
return ret;
}
static int cros_typec_switch_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_typec_switch_data *sdata;
sdata = devm_kzalloc(dev, sizeof(*sdata), GFP_KERNEL);
if (!sdata)
return -ENOMEM;
sdata->dev = dev;
sdata->ec = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, sdata);
return cros_typec_register_switches(sdata);
}
static int cros_typec_switch_remove(struct platform_device *pdev)
{
struct cros_typec_switch_data *sdata = platform_get_drvdata(pdev);
cros_typec_unregister_switches(sdata);
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_typec_switch_acpi_id[] = {
{ "GOOG001A", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, cros_typec_switch_acpi_id);
#endif
static struct platform_driver cros_typec_switch_driver = {
.driver = {
.name = "cros-typec-switch",
.acpi_match_table = ACPI_PTR(cros_typec_switch_acpi_id),
},
.probe = cros_typec_switch_probe,
.remove = cros_typec_switch_remove,
};
module_platform_driver(cros_typec_switch_driver);
MODULE_AUTHOR("Prashant Malani <[email protected]>");
MODULE_DESCRIPTION("ChromeOS EC Type-C Switch control");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_typec_switch.c |
// SPDX-License-Identifier: GPL-2.0
// LPC variant I/O for Microchip EC
//
// Copyright (C) 2016 Google, Inc
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "cros_ec_lpc_mec.h"
/*
* This mutex must be held while accessing the EMI unit. We can't rely on the
* EC mutex because memmap data may be accessed without it being held.
*/
static DEFINE_MUTEX(io_mutex);
static u16 mec_emi_base, mec_emi_end;
/**
* cros_ec_lpc_mec_emi_write_address() - Initialize EMI at a given address.
*
* @addr: Starting read / write address
* @access_type: Type of access, typically 32-bit auto-increment
*/
static void cros_ec_lpc_mec_emi_write_address(u16 addr,
enum cros_ec_lpc_mec_emi_access_mode access_type)
{
outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0(mec_emi_base));
outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1(mec_emi_base));
}
/**
* cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range.
*
* @offset: Address offset
* @length: Number of bytes to check
*
* Return: 1 if in range, 0 if not, and -EINVAL on failure
* such as the mec range not being initialized
*/
int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length)
{
if (length == 0)
return -EINVAL;
if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0))
return -EINVAL;
if (offset >= mec_emi_base && offset < mec_emi_end) {
if (WARN_ON(offset + length - 1 >= mec_emi_end))
return -EINVAL;
return 1;
}
if (WARN_ON(offset + length > mec_emi_base && offset < mec_emi_end))
return -EINVAL;
return 0;
}
/**
* cros_ec_lpc_io_bytes_mec() - Read / write bytes to MEC EMI port.
*
* @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
* @offset: Base read / write address
* @length: Number of bytes to read / write
* @buf: Destination / source buffer
*
* Return: 8-bit checksum of all bytes read / written
*/
u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
unsigned int offset, unsigned int length,
u8 *buf)
{
int i = 0;
int io_addr;
u8 sum = 0;
enum cros_ec_lpc_mec_emi_access_mode access, new_access;
/* Return checksum of 0 if window is not initialized */
WARN_ON(mec_emi_base == 0 || mec_emi_end == 0);
if (mec_emi_base == 0 || mec_emi_end == 0)
return 0;
/*
* Long access cannot be used on misaligned data since reading B0 loads
* the data register and writing B3 flushes.
*/
if (offset & 0x3 || length < 4)
access = ACCESS_TYPE_BYTE;
else
access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
mutex_lock(&io_mutex);
/* Initialize I/O at desired address */
cros_ec_lpc_mec_emi_write_address(offset, access);
/* Skip bytes in case of misaligned offset */
io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base) + (offset & 0x3);
while (i < length) {
while (io_addr <= MEC_EMI_EC_DATA_B3(mec_emi_base)) {
if (io_type == MEC_IO_READ)
buf[i] = inb(io_addr++);
else
outb(buf[i], io_addr++);
sum += buf[i++];
offset++;
/* Extra bounds check in case of misaligned length */
if (i == length)
goto done;
}
/*
* Use long auto-increment access except for misaligned write,
* since writing B3 triggers the flush.
*/
if (length - i < 4 && io_type == MEC_IO_WRITE)
new_access = ACCESS_TYPE_BYTE;
else
new_access = ACCESS_TYPE_LONG_AUTO_INCREMENT;
if (new_access != access ||
access != ACCESS_TYPE_LONG_AUTO_INCREMENT) {
access = new_access;
cros_ec_lpc_mec_emi_write_address(offset, access);
}
/* Access [B0, B3] on each loop pass */
io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base);
}
done:
mutex_unlock(&io_mutex);
return sum;
}
EXPORT_SYMBOL(cros_ec_lpc_io_bytes_mec);
void cros_ec_lpc_mec_init(unsigned int base, unsigned int end)
{
mec_emi_base = base;
mec_emi_end = end;
}
EXPORT_SYMBOL(cros_ec_lpc_mec_init);
| linux-master | drivers/platform/chrome/cros_ec_lpc_mec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ChromeOS EC multi-function device
*
* Copyright (C) 2012 Google, Inc
*
* The ChromeOS EC multi function device is used to mux all the requests
* to the EC device for its multiple features: keyboard controller,
* battery charging and regulator control, firmware update.
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include "cros_ec.h"
static struct cros_ec_platform ec_p = {
.ec_name = CROS_EC_DEV_NAME,
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_EC_INDEX),
};
static struct cros_ec_platform pd_p = {
.ec_name = CROS_EC_DEV_PD_NAME,
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
/**
* cros_ec_irq_handler() - top half part of the interrupt handler
* @irq: IRQ id
* @data: (ec_dev) Device with events to process.
*
* Return: Wakeup the bottom half
*/
static irqreturn_t cros_ec_irq_handler(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
ec_dev->last_event_time = cros_ec_get_time_ns();
return IRQ_WAKE_THREAD;
}
/**
* cros_ec_handle_event() - process and forward pending events on EC
* @ec_dev: Device with events to process.
*
* Call this function in a loop when the kernel is notified that the EC has
* pending events.
*
* Return: true if more events are still pending and this function should be
* called again.
*/
static bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
{
bool wake_event;
bool ec_has_more_events;
int ret;
ret = cros_ec_get_next_event(ec_dev, &wake_event, &ec_has_more_events);
/*
* Signal only if wake host events or any interrupt if
* cros_ec_get_next_event() returned an error (default value for
* wake_event is true)
*/
if (wake_event && device_may_wakeup(ec_dev->dev))
pm_wakeup_event(ec_dev->dev, 0);
if (ret > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
0, ec_dev);
return ec_has_more_events;
}
/**
* cros_ec_irq_thread() - bottom half part of the interrupt handler
* @irq: IRQ id
* @data: (ec_dev) Device with events to process.
*
* Return: Interrupt handled.
*/
irqreturn_t cros_ec_irq_thread(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
bool ec_has_more_events;
do {
ec_has_more_events = cros_ec_handle_event(ec_dev);
} while (ec_has_more_events);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(cros_ec_irq_thread);
static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
{
int ret;
struct {
struct cros_ec_command msg;
union {
struct ec_params_host_sleep_event req0;
struct ec_params_host_sleep_event_v1 req1;
struct ec_response_host_sleep_event_v1 resp1;
} u;
} __packed buf;
memset(&buf, 0, sizeof(buf));
if (ec_dev->host_sleep_v1) {
buf.u.req1.sleep_event = sleep_event;
buf.u.req1.suspend_params.sleep_timeout_ms =
ec_dev->suspend_timeout_ms;
buf.msg.outsize = sizeof(buf.u.req1);
if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
(sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME))
buf.msg.insize = sizeof(buf.u.resp1);
buf.msg.version = 1;
} else {
buf.u.req0.sleep_event = sleep_event;
buf.msg.outsize = sizeof(buf.u.req0);
}
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
/* Report failure to transition to system wide suspend with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
(sleep_event == HOST_SLEEP_EVENT_S0IX_RESUME ||
sleep_event == HOST_SLEEP_EVENT_S3_RESUME)) {
ec_dev->last_resume_result =
buf.u.resp1.resume_response.sleep_transitions;
WARN_ONCE(buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TIMEOUT,
"EC detected sleep transition timeout. Total sleep transitions: %d",
buf.u.resp1.resume_response.sleep_transitions &
EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK);
}
return ret;
}
static int cros_ec_ready_event(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *_notify)
{
struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device,
notifier_ready);
u32 host_event = cros_ec_get_host_event(ec_dev);
if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) {
mutex_lock(&ec_dev->lock);
cros_ec_query_all(ec_dev);
mutex_unlock(&ec_dev->lock);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
/**
* cros_ec_register() - Register a new ChromeOS EC, using the provided info.
* @ec_dev: Device to register.
*
* Before calling this, allocate a pointer to a new device and then fill
* in all the fields up to the --private-- marker.
*
* Return: 0 on success or negative error code.
*/
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
int err = 0;
BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->panic_notifier);
ec_dev->max_request = sizeof(struct ec_params_hello);
ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
ec_dev->max_passthru = 0;
ec_dev->ec = NULL;
ec_dev->pd = NULL;
ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
return -ENOMEM;
ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
if (!ec_dev->dout)
return -ENOMEM;
lockdep_register_key(&ec_dev->lockdep_key);
mutex_init(&ec_dev->lock);
lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
err = cros_ec_query_all(ec_dev);
if (err) {
dev_err(dev, "Cannot identify the EC: error %d\n", err);
goto exit;
}
if (ec_dev->irq > 0) {
err = devm_request_threaded_irq(dev, ec_dev->irq,
cros_ec_irq_handler,
cros_ec_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"chromeos-ec", ec_dev);
if (err) {
dev_err(dev, "Failed to request IRQ %d: %d\n",
ec_dev->irq, err);
goto exit;
}
}
/* Register a platform device for the main EC instance */
ec_dev->ec = platform_device_register_data(ec_dev->dev, "cros-ec-dev",
PLATFORM_DEVID_AUTO, &ec_p,
sizeof(struct cros_ec_platform));
if (IS_ERR(ec_dev->ec)) {
dev_err(ec_dev->dev,
"Failed to create CrOS EC platform device\n");
err = PTR_ERR(ec_dev->ec);
goto exit;
}
if (ec_dev->max_passthru) {
/*
* Register a platform device for the PD behind the main EC.
* We make the following assumptions:
* - behind an EC, we have a pd
* - only one device added.
* - the EC is responsive at init time (it is not true for a
* sensor hub).
*/
ec_dev->pd = platform_device_register_data(ec_dev->dev,
"cros-ec-dev",
PLATFORM_DEVID_AUTO, &pd_p,
sizeof(struct cros_ec_platform));
if (IS_ERR(ec_dev->pd)) {
dev_err(ec_dev->dev,
"Failed to create CrOS PD platform device\n");
err = PTR_ERR(ec_dev->pd);
goto exit;
}
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
err = devm_of_platform_populate(dev);
if (err) {
dev_err(dev, "Failed to register sub-devices\n");
goto exit;
}
}
/*
* Clear sleep event - this will fail harmlessly on platforms that
* don't implement the sleep event host command.
*/
err = cros_ec_sleep_event(ec_dev, 0);
if (err < 0)
dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec\n",
err);
if (ec_dev->mkbp_event_supported) {
/*
* Register the notifier for EC_HOST_EVENT_INTERFACE_READY
* event.
*/
ec_dev->notifier_ready.notifier_call = cros_ec_ready_event;
err = blocking_notifier_chain_register(&ec_dev->event_notifier,
&ec_dev->notifier_ready);
if (err)
goto exit;
}
dev_info(dev, "Chrome EC device registered\n");
/*
* Unlock EC that may be waiting for AP to process MKBP events.
* If the AP takes to long to answer, the EC would stop sending events.
*/
if (ec_dev->mkbp_event_supported)
cros_ec_irq_thread(0, ec_dev);
return 0;
exit:
platform_device_unregister(ec_dev->ec);
platform_device_unregister(ec_dev->pd);
mutex_destroy(&ec_dev->lock);
lockdep_unregister_key(&ec_dev->lockdep_key);
return err;
}
EXPORT_SYMBOL(cros_ec_register);
/**
* cros_ec_unregister() - Remove a ChromeOS EC.
* @ec_dev: Device to unregister.
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
*
* Return: 0 on success or negative error code.
*/
void cros_ec_unregister(struct cros_ec_device *ec_dev)
{
platform_device_unregister(ec_dev->pd);
platform_device_unregister(ec_dev->ec);
mutex_destroy(&ec_dev->lock);
lockdep_unregister_key(&ec_dev->lockdep_key);
}
EXPORT_SYMBOL(cros_ec_unregister);
#ifdef CONFIG_PM_SLEEP
/**
* cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
* @ec_dev: Device to suspend.
*
* This can be called by drivers to handle a suspend event.
*
* Return: 0 on success or negative error code.
*/
int cros_ec_suspend(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
int ret;
u8 sleep_event;
sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
HOST_SLEEP_EVENT_S3_SUSPEND :
HOST_SLEEP_EVENT_S0IX_SUSPEND;
ret = cros_ec_sleep_event(ec_dev, sleep_event);
if (ret < 0)
dev_dbg(ec_dev->dev, "Error %d sending suspend event to ec\n",
ret);
if (device_may_wakeup(dev))
ec_dev->wake_enabled = !enable_irq_wake(ec_dev->irq);
else
ec_dev->wake_enabled = false;
disable_irq(ec_dev->irq);
ec_dev->suspended = true;
return 0;
}
EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
bool wake_event;
while (ec_dev->mkbp_event_supported &&
cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) {
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
if (wake_event && device_may_wakeup(ec_dev->dev))
pm_wakeup_event(ec_dev->dev, 0);
}
}
/**
* cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
* @ec_dev: Device to resume.
*
* This can be called by drivers to handle a resume event.
*
* Return: 0 on success or negative error code.
*/
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
int ret;
u8 sleep_event;
ec_dev->suspended = false;
enable_irq(ec_dev->irq);
sleep_event = (!IS_ENABLED(CONFIG_ACPI) || pm_suspend_via_firmware()) ?
HOST_SLEEP_EVENT_S3_RESUME :
HOST_SLEEP_EVENT_S0IX_RESUME;
ret = cros_ec_sleep_event(ec_dev, sleep_event);
if (ret < 0)
dev_dbg(ec_dev->dev, "Error %d sending resume event to ec\n",
ret);
if (ec_dev->wake_enabled)
disable_irq_wake(ec_dev->irq);
/*
* Let the mfd devices know about events that occur during
* suspend. This way the clients know what to do with them.
*/
cros_ec_report_events_during_suspend(ec_dev);
return 0;
}
EXPORT_SYMBOL(cros_ec_resume);
#endif
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS EC core driver");
| linux-master | drivers/platform/chrome/cros_ec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Sensor HUB driver that discovers sensors behind a ChromeOS Embedded
* Controller.
*
* Copyright 2019 Google LLC
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#define DRV_NAME "cros-ec-sensorhub"
static void cros_ec_sensorhub_free_sensor(void *arg)
{
struct platform_device *pdev = arg;
platform_device_unregister(pdev);
}
static int cros_ec_sensorhub_allocate_sensor(struct device *parent,
char *sensor_name,
int sensor_num)
{
struct cros_ec_sensor_platform sensor_platforms = {
.sensor_num = sensor_num,
};
struct platform_device *pdev;
pdev = platform_device_register_data(parent, sensor_name,
PLATFORM_DEVID_AUTO,
&sensor_platforms,
sizeof(sensor_platforms));
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return devm_add_action_or_reset(parent,
cros_ec_sensorhub_free_sensor,
pdev);
}
static int cros_ec_sensorhub_register(struct device *dev,
struct cros_ec_sensorhub *sensorhub)
{
int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
struct cros_ec_command *msg = sensorhub->msg;
struct cros_ec_dev *ec = sensorhub->ec;
int ret, i;
char *name;
msg->version = 1;
msg->insize = sizeof(struct ec_response_motion_sense);
msg->outsize = sizeof(struct ec_params_motion_sense);
for (i = 0; i < sensorhub->sensor_num; i++) {
sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
sensorhub->params->info.sensor_num = i;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
i, ret, msg->result);
continue;
}
switch (sensorhub->resp->info.type) {
case MOTIONSENSE_TYPE_ACCEL:
name = "cros-ec-accel";
break;
case MOTIONSENSE_TYPE_BARO:
name = "cros-ec-baro";
break;
case MOTIONSENSE_TYPE_GYRO:
name = "cros-ec-gyro";
break;
case MOTIONSENSE_TYPE_MAG:
name = "cros-ec-mag";
break;
case MOTIONSENSE_TYPE_PROX:
name = "cros-ec-prox";
break;
case MOTIONSENSE_TYPE_LIGHT:
name = "cros-ec-light";
break;
case MOTIONSENSE_TYPE_ACTIVITY:
name = "cros-ec-activity";
break;
default:
dev_warn(dev, "unknown type %d\n",
sensorhub->resp->info.type);
continue;
}
ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
if (ret)
return ret;
sensor_type[sensorhub->resp->info.type]++;
}
if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
ec->has_kb_wake_angle = true;
if (cros_ec_check_features(ec,
EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
ret = cros_ec_sensorhub_allocate_sensor(dev,
"cros-ec-lid-angle",
0);
if (ret)
return ret;
}
return 0;
}
static int cros_ec_sensorhub_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct cros_ec_sensorhub *data;
struct cros_ec_command *msg;
int ret, i, sensor_num;
msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
max((u16)sizeof(struct ec_params_motion_sense),
ec->ec_dev->max_response), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
if (!data)
return -ENOMEM;
mutex_init(&data->cmd_lock);
data->dev = dev;
data->ec = ec;
data->msg = msg;
data->params = (struct ec_params_motion_sense *)msg->data;
data->resp = (struct ec_response_motion_sense *)msg->data;
dev_set_drvdata(dev, data);
/* Check whether this EC is a sensor hub. */
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) {
sensor_num = cros_ec_get_sensor_count(ec);
if (sensor_num < 0) {
dev_err(dev,
"Unable to retrieve sensor information (err:%d)\n",
sensor_num);
return sensor_num;
}
if (sensor_num == 0) {
dev_err(dev, "Zero sensors reported.\n");
return -EINVAL;
}
data->sensor_num = sensor_num;
/*
* Prepare the ring handler before enumering the
* sensors.
*/
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
ret = cros_ec_sensorhub_ring_allocate(data);
if (ret)
return ret;
}
/* Enumerate the sensors.*/
ret = cros_ec_sensorhub_register(dev, data);
if (ret)
return ret;
/*
* When the EC does not have a FIFO, the sensors will query
* their data themselves via sysfs or a software trigger.
*/
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
ret = cros_ec_sensorhub_ring_add(data);
if (ret)
return ret;
/*
* The msg and its data is not under the control of the
* ring handler.
*/
return devm_add_action_or_reset(dev,
cros_ec_sensorhub_ring_remove,
data);
}
} else {
/*
* If the device has sensors but does not claim to
* be a sensor hub, we are in legacy mode.
*/
data->sensor_num = 2;
for (i = 0; i < data->sensor_num; i++) {
ret = cros_ec_sensorhub_allocate_sensor(dev,
"cros-ec-accel-legacy", i);
if (ret)
return ret;
}
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* When the EC is suspending, we must stop sending interrupt,
* we may use the same interrupt line for waking up the device.
* Tell the EC to stop sending non-interrupt event on the iio ring.
*/
static int cros_ec_sensorhub_suspend(struct device *dev)
{
struct cros_ec_sensorhub *sensorhub = dev_get_drvdata(dev);
struct cros_ec_dev *ec = sensorhub->ec;
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
return 0;
}
static int cros_ec_sensorhub_resume(struct device *dev)
{
struct cros_ec_sensorhub *sensorhub = dev_get_drvdata(dev);
struct cros_ec_dev *ec = sensorhub->ec;
if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
cros_ec_sensorhub_suspend,
cros_ec_sensorhub_resume);
static struct platform_driver cros_ec_sensorhub_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_sensorhub_pm_ops,
},
.probe = cros_ec_sensorhub_probe,
};
module_platform_driver(cros_ec_sensorhub_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Gwendal Grignou <[email protected]>");
MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_ec_sensorhub.c |
// SPDX-License-Identifier: GPL-2.0+
// Expose the ChromeOS EC through sysfs
//
// Copyright (C) 2014 Google, Inc.
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#define DRV_NAME "cros-ec-sysfs"
/* Accessor functions */
static ssize_t reboot_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int count = 0;
count += sysfs_emit_at(buf, count,
"ro|rw|cancel|cold|disable-jump|hibernate|cold-ap-off");
count += sysfs_emit_at(buf, count, " [at-shutdown]\n");
return count;
}
static ssize_t reboot_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
static const struct {
const char * const str;
uint8_t cmd;
uint8_t flags;
} words[] = {
{"cancel", EC_REBOOT_CANCEL, 0},
{"ro", EC_REBOOT_JUMP_RO, 0},
{"rw", EC_REBOOT_JUMP_RW, 0},
{"cold-ap-off", EC_REBOOT_COLD_AP_OFF, 0},
{"cold", EC_REBOOT_COLD, 0},
{"disable-jump", EC_REBOOT_DISABLE_JUMP, 0},
{"hibernate", EC_REBOOT_HIBERNATE, 0},
{"at-shutdown", -1, EC_REBOOT_FLAG_ON_AP_SHUTDOWN},
};
struct cros_ec_command *msg;
struct ec_params_reboot_ec *param;
int got_cmd = 0, offset = 0;
int i;
int ret;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*param), GFP_KERNEL);
if (!msg)
return -ENOMEM;
param = (struct ec_params_reboot_ec *)msg->data;
param->flags = 0;
while (1) {
/* Find word to start scanning */
while (buf[offset] && isspace(buf[offset]))
offset++;
if (!buf[offset])
break;
for (i = 0; i < ARRAY_SIZE(words); i++) {
if (!strncasecmp(words[i].str, buf+offset,
strlen(words[i].str))) {
if (words[i].flags) {
param->flags |= words[i].flags;
} else {
param->cmd = words[i].cmd;
got_cmd = 1;
}
break;
}
}
/* On to the next word, if any */
while (buf[offset] && !isspace(buf[offset]))
offset++;
}
if (!got_cmd) {
count = -EINVAL;
goto exit;
}
msg->version = 0;
msg->command = EC_CMD_REBOOT_EC + ec->cmd_offset;
msg->outsize = sizeof(*param);
msg->insize = 0;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
count = ret;
exit:
kfree(msg);
return count;
}
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
static const char * const image_names[] = {"unknown", "RO", "RW"};
struct ec_response_get_version *r_ver;
struct ec_response_get_chip_info *r_chip;
struct ec_response_board_version *r_board;
struct cros_ec_command *msg;
int ret;
int count = 0;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* Get versions. RW may change. */
msg->version = 0;
msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_ver);
msg->outsize = 0;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count = ret;
goto exit;
}
r_ver = (struct ec_response_get_version *)msg->data;
/* Strings should be null-terminated, but let's be sure. */
r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
r_ver->version_string_rw[sizeof(r_ver->version_string_rw) - 1] = '\0';
count += sysfs_emit_at(buf, count, "RO version: %s\n", r_ver->version_string_ro);
count += sysfs_emit_at(buf, count, "RW version: %s\n", r_ver->version_string_rw);
count += sysfs_emit_at(buf, count, "Firmware copy: %s\n",
(r_ver->current_image < ARRAY_SIZE(image_names) ?
image_names[r_ver->current_image] : "?"));
/* Get build info. */
msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset;
msg->insize = EC_HOST_PARAM_SIZE;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count += sysfs_emit_at(buf, count,
"Build info: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += sysfs_emit_at(buf, count, "Build info: %s\n", msg->data);
}
/* Get chip info. */
msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset;
msg->insize = sizeof(*r_chip);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count += sysfs_emit_at(buf, count,
"Chip info: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
r_chip = (struct ec_response_get_chip_info *)msg->data;
r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
r_chip->name[sizeof(r_chip->name) - 1] = '\0';
r_chip->revision[sizeof(r_chip->revision) - 1] = '\0';
count += sysfs_emit_at(buf, count, "Chip vendor: %s\n", r_chip->vendor);
count += sysfs_emit_at(buf, count, "Chip name: %s\n", r_chip->name);
count += sysfs_emit_at(buf, count, "Chip revision: %s\n", r_chip->revision);
}
/* Get board version */
msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_board);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
count += sysfs_emit_at(buf, count,
"Board version: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
r_board = (struct ec_response_board_version *)msg->data;
count += sysfs_emit_at(buf, count,
"Board version: %d\n",
r_board->board_version);
}
exit:
kfree(msg);
return count;
}
static ssize_t flashinfo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ec_response_flash_info *resp;
struct cros_ec_command *msg;
int ret;
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
msg = kmalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
if (!msg)
return -ENOMEM;
/* The flash info shouldn't ever change, but ask each time anyway. */
msg->version = 0;
msg->command = EC_CMD_FLASH_INFO + ec->cmd_offset;
msg->insize = sizeof(*resp);
msg->outsize = 0;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
resp = (struct ec_response_flash_info *)msg->data;
ret = sysfs_emit(buf,
"FlashSize %d\nWriteSize %d\n"
"EraseSize %d\nProtectSize %d\n",
resp->flash_size, resp->write_block_size,
resp->erase_block_size, resp->protect_block_size);
exit:
kfree(msg);
return ret;
}
/* Keyboard wake angle control */
static ssize_t kb_wake_angle_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct ec_response_motion_sense *resp;
struct ec_params_motion_sense *param;
struct cros_ec_command *msg;
int ret;
msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
param = (struct ec_params_motion_sense *)msg->data;
msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
msg->version = 2;
param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
param->kb_wake_angle.data = EC_MOTION_SENSE_NO_VALUE;
msg->outsize = sizeof(*param);
msg->insize = sizeof(*resp);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
resp = (struct ec_response_motion_sense *)msg->data;
ret = sysfs_emit(buf, "%d\n", resp->kb_wake_angle.ret);
exit:
kfree(msg);
return ret;
}
static ssize_t kb_wake_angle_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
struct ec_params_motion_sense *param;
struct cros_ec_command *msg;
u16 angle;
int ret;
ret = kstrtou16(buf, 0, &angle);
if (ret)
return ret;
msg = kmalloc(sizeof(*msg) + EC_HOST_PARAM_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
param = (struct ec_params_motion_sense *)msg->data;
msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
msg->version = 2;
param->cmd = MOTIONSENSE_CMD_KB_WAKE_ANGLE;
param->kb_wake_angle.data = angle;
msg->outsize = sizeof(*param);
msg->insize = sizeof(struct ec_response_motion_sense);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
kfree(msg);
if (ret < 0)
return ret;
return count;
}
/* Module initialization */
static DEVICE_ATTR_RW(reboot);
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(flashinfo);
static DEVICE_ATTR_RW(kb_wake_angle);
static struct attribute *__ec_attrs[] = {
&dev_attr_kb_wake_angle.attr,
&dev_attr_reboot.attr,
&dev_attr_version.attr,
&dev_attr_flashinfo.attr,
NULL,
};
static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct cros_ec_dev *ec = to_cros_ec_dev(dev);
if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
return 0;
return a->mode;
}
static const struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
.is_visible = cros_ec_ctrl_visible,
};
static int cros_ec_sysfs_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
struct device *dev = &pd->dev;
int ret;
ret = sysfs_create_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
if (ret < 0)
dev_err(dev, "failed to create attributes. err=%d\n", ret);
return ret;
}
static int cros_ec_sysfs_remove(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group);
return 0;
}
static struct platform_driver cros_ec_sysfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_sysfs_probe,
.remove = cros_ec_sysfs_remove,
};
module_platform_driver(cros_ec_sysfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_ec_sysfs.c |
// SPDX-License-Identifier: GPL-2.0
// I2C interface for ChromeOS Embedded Controller
//
// Copyright (C) 2012 Google, Inc
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "cros_ec.h"
/*
* Request format for protocol v3
* byte 0 0xda (EC_COMMAND_PROTOCOL_3)
* byte 1-8 struct ec_host_request
* byte 10- response data
*/
struct ec_host_request_i2c {
/* Always 0xda to backward compatible with v2 struct */
uint8_t command_protocol;
struct ec_host_request ec_request;
} __packed;
/*
* Response format for protocol v3
* byte 0 result code
* byte 1 packet_length
* byte 2-9 struct ec_host_response
* byte 10- response data
*/
struct ec_host_response_i2c {
uint8_t result;
uint8_t packet_length;
struct ec_host_response ec_response;
} __packed;
static inline struct cros_ec_device *to_ec_dev(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return i2c_get_clientdata(client);
}
static int cros_ec_pkt_xfer_i2c(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
struct i2c_client *client = ec_dev->priv;
int ret = -ENOMEM;
int i;
int packet_len;
u8 *out_buf = NULL;
u8 *in_buf = NULL;
u8 sum;
struct i2c_msg i2c_msg[2];
struct ec_host_response *ec_response;
struct ec_host_request_i2c *ec_request_i2c;
struct ec_host_response_i2c *ec_response_i2c;
int request_header_size = sizeof(struct ec_host_request_i2c);
int response_header_size = sizeof(struct ec_host_response_i2c);
i2c_msg[0].addr = client->addr;
i2c_msg[0].flags = 0;
i2c_msg[1].addr = client->addr;
i2c_msg[1].flags = I2C_M_RD;
packet_len = msg->insize + response_header_size;
if (packet_len > ec_dev->din_size) {
ret = -EINVAL;
goto done;
}
in_buf = ec_dev->din;
i2c_msg[1].len = packet_len;
i2c_msg[1].buf = (char *) in_buf;
packet_len = msg->outsize + request_header_size;
if (packet_len > ec_dev->dout_size) {
ret = -EINVAL;
goto done;
}
out_buf = ec_dev->dout;
i2c_msg[0].len = packet_len;
i2c_msg[0].buf = (char *) out_buf;
/* create request data */
ec_request_i2c = (struct ec_host_request_i2c *) out_buf;
ec_request_i2c->command_protocol = EC_COMMAND_PROTOCOL_3;
ec_dev->dout++;
ret = cros_ec_prepare_tx(ec_dev, msg);
if (ret < 0)
goto done;
ec_dev->dout--;
/* send command to EC and read answer */
ret = i2c_transfer(client->adapter, i2c_msg, 2);
if (ret < 0) {
dev_dbg(ec_dev->dev, "i2c transfer failed: %d\n", ret);
goto done;
} else if (ret != 2) {
dev_err(ec_dev->dev, "failed to get response: %d\n", ret);
ret = -EIO;
goto done;
}
ec_response_i2c = (struct ec_host_response_i2c *) in_buf;
msg->result = ec_response_i2c->result;
ec_response = &ec_response_i2c->ec_response;
switch (msg->result) {
case EC_RES_SUCCESS:
break;
case EC_RES_IN_PROGRESS:
ret = -EAGAIN;
dev_dbg(ec_dev->dev, "command 0x%02x in progress\n",
msg->command);
goto done;
default:
dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n",
msg->command, msg->result);
/*
* When we send v3 request to v2 ec, ec won't recognize the
* 0xda (EC_COMMAND_PROTOCOL_3) and will return with status
* EC_RES_INVALID_COMMAND with zero data length.
*
* In case of invalid command for v3 protocol the data length
* will be at least sizeof(struct ec_host_response)
*/
if (ec_response_i2c->result == EC_RES_INVALID_COMMAND &&
ec_response_i2c->packet_length == 0) {
ret = -EPROTONOSUPPORT;
goto done;
}
}
if (ec_response_i2c->packet_length < sizeof(struct ec_host_response)) {
dev_err(ec_dev->dev,
"response of %u bytes too short; not a full header\n",
ec_response_i2c->packet_length);
ret = -EBADMSG;
goto done;
}
if (msg->insize < ec_response->data_len) {
dev_err(ec_dev->dev,
"response data size is too large: expected %u, got %u\n",
msg->insize,
ec_response->data_len);
ret = -EMSGSIZE;
goto done;
}
/* copy response packet payload and compute checksum */
sum = 0;
for (i = 0; i < sizeof(struct ec_host_response); i++)
sum += ((u8 *)ec_response)[i];
memcpy(msg->data,
in_buf + response_header_size,
ec_response->data_len);
for (i = 0; i < ec_response->data_len; i++)
sum += msg->data[i];
/* All bytes should sum to zero */
if (sum) {
dev_err(ec_dev->dev, "bad packet checksum\n");
ret = -EBADMSG;
goto done;
}
ret = ec_response->data_len;
done:
if (msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
static int cros_ec_cmd_xfer_i2c(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
struct i2c_client *client = ec_dev->priv;
int ret = -ENOMEM;
int i;
int len;
int packet_len;
u8 *out_buf = NULL;
u8 *in_buf = NULL;
u8 sum;
struct i2c_msg i2c_msg[2];
i2c_msg[0].addr = client->addr;
i2c_msg[0].flags = 0;
i2c_msg[1].addr = client->addr;
i2c_msg[1].flags = I2C_M_RD;
/*
* allocate larger packet (one byte for checksum, one byte for
* length, and one for result code)
*/
packet_len = msg->insize + 3;
in_buf = kzalloc(packet_len, GFP_KERNEL);
if (!in_buf)
goto done;
i2c_msg[1].len = packet_len;
i2c_msg[1].buf = (char *)in_buf;
/*
* allocate larger packet (one byte for checksum, one for
* command code, one for length, and one for command version)
*/
packet_len = msg->outsize + 4;
out_buf = kzalloc(packet_len, GFP_KERNEL);
if (!out_buf)
goto done;
i2c_msg[0].len = packet_len;
i2c_msg[0].buf = (char *)out_buf;
out_buf[0] = EC_CMD_VERSION0 + msg->version;
out_buf[1] = msg->command;
out_buf[2] = msg->outsize;
/* copy message payload and compute checksum */
sum = out_buf[0] + out_buf[1] + out_buf[2];
for (i = 0; i < msg->outsize; i++) {
out_buf[3 + i] = msg->data[i];
sum += out_buf[3 + i];
}
out_buf[3 + msg->outsize] = sum;
/* send command to EC and read answer */
ret = i2c_transfer(client->adapter, i2c_msg, 2);
if (ret < 0) {
dev_err(ec_dev->dev, "i2c transfer failed: %d\n", ret);
goto done;
} else if (ret != 2) {
dev_err(ec_dev->dev, "failed to get response: %d\n", ret);
ret = -EIO;
goto done;
}
/* check response error code */
msg->result = i2c_msg[1].buf[0];
ret = cros_ec_check_result(ec_dev, msg);
if (ret)
goto done;
len = in_buf[1];
if (len > msg->insize) {
dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
len, msg->insize);
ret = -ENOSPC;
goto done;
}
/* copy response packet payload and compute checksum */
sum = in_buf[0] + in_buf[1];
for (i = 0; i < len; i++) {
msg->data[i] = in_buf[2 + i];
sum += in_buf[2 + i];
}
dev_dbg(ec_dev->dev, "packet: %*ph, sum = %02x\n",
i2c_msg[1].len, in_buf, sum);
if (sum != in_buf[2 + len]) {
dev_err(ec_dev->dev, "bad packet checksum\n");
ret = -EBADMSG;
goto done;
}
ret = len;
done:
kfree(in_buf);
kfree(out_buf);
if (msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
return ret;
}
static int cros_ec_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct cros_ec_device *ec_dev = NULL;
int err;
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
i2c_set_clientdata(client, ec_dev);
ec_dev->dev = dev;
ec_dev->priv = client;
ec_dev->irq = client->irq;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_i2c;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_i2c;
ec_dev->phys_name = client->adapter->name;
ec_dev->din_size = sizeof(struct ec_host_response_i2c) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request_i2c);
err = cros_ec_register(ec_dev);
if (err) {
dev_err(dev, "cannot register EC\n");
return err;
}
return 0;
}
static void cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
cros_ec_unregister(ec_dev);
}
#ifdef CONFIG_PM_SLEEP
static int cros_ec_i2c_suspend(struct device *dev)
{
struct cros_ec_device *ec_dev = to_ec_dev(dev);
return cros_ec_suspend(ec_dev);
}
static int cros_ec_i2c_resume(struct device *dev)
{
struct cros_ec_device *ec_dev = to_ec_dev(dev);
return cros_ec_resume(ec_dev);
}
#endif
static const struct dev_pm_ops cros_ec_i2c_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_i2c_suspend, cros_ec_i2c_resume)
};
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_i2c_of_match[] = {
{ .compatible = "google,cros-ec-i2c", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
#endif
static const struct i2c_device_id cros_ec_i2c_id[] = {
{ "cros-ec-i2c", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, cros_ec_i2c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_ec_i2c_acpi_id[] = {
{ "GOOG0008", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_acpi_id);
#endif
static struct i2c_driver cros_ec_driver = {
.driver = {
.name = "cros-ec-i2c",
.acpi_match_table = ACPI_PTR(cros_ec_i2c_acpi_id),
.of_match_table = of_match_ptr(cros_ec_i2c_of_match),
.pm = &cros_ec_i2c_pm_ops,
},
.probe = cros_ec_i2c_probe,
.remove = cros_ec_i2c_remove,
.id_table = cros_ec_i2c_id,
};
module_i2c_driver(cros_ec_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("I2C interface for ChromeOS Embedded Controller");
| linux-master | drivers/platform/chrome/cros_ec_i2c.c |
// SPDX-License-Identifier: GPL-2.0+
// Keyboard backlight LED driver for ChromeOS
//
// Copyright (C) 2012 Google, Inc.
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
struct keyboard_led {
struct led_classdev cdev;
struct cros_ec_device *ec;
};
/**
* struct keyboard_led_drvdata - keyboard LED driver data.
* @init: Init function.
* @brightness_get: Get LED brightness level.
* @brightness_set: Set LED brightness level. Must not sleep.
* @brightness_set_blocking: Set LED brightness level. It can block the
* caller for the time required for accessing a
* LED device register
* @max_brightness: Maximum brightness.
*
* See struct led_classdev in include/linux/leds.h for more details.
*/
struct keyboard_led_drvdata {
int (*init)(struct platform_device *pdev);
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
int (*brightness_set_blocking)(struct led_classdev *led_cdev,
enum led_brightness brightness);
enum led_brightness max_brightness;
};
#define KEYBOARD_BACKLIGHT_MAX 100
#ifdef CONFIG_ACPI
/* Keyboard LED ACPI Device must be defined in firmware */
#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
static void keyboard_led_set_brightness_acpi(struct led_classdev *cdev,
enum led_brightness brightness)
{
union acpi_object param;
struct acpi_object_list input;
acpi_status status;
param.type = ACPI_TYPE_INTEGER;
param.integer.value = brightness;
input.count = 1;
input.pointer = ¶m;
status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
&input, NULL);
if (ACPI_FAILURE(status))
dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
status);
}
static enum led_brightness
keyboard_led_get_brightness_acpi(struct led_classdev *cdev)
{
unsigned long long brightness;
acpi_status status;
status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
NULL, &brightness);
if (ACPI_FAILURE(status)) {
dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
status);
return -EIO;
}
return brightness;
}
static int keyboard_led_init_acpi(struct platform_device *pdev)
{
acpi_handle handle;
acpi_status status;
/* Look for the keyboard LED ACPI Device */
status = acpi_get_handle(ACPI_ROOT_OBJECT,
ACPI_KEYBOARD_BACKLIGHT_DEVICE,
&handle);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
return -ENXIO;
}
return 0;
}
static const struct keyboard_led_drvdata keyboard_led_drvdata_acpi = {
.init = keyboard_led_init_acpi,
.brightness_set = keyboard_led_set_brightness_acpi,
.brightness_get = keyboard_led_get_brightness_acpi,
.max_brightness = KEYBOARD_BACKLIGHT_MAX,
};
#endif /* CONFIG_ACPI */
#if IS_ENABLED(CONFIG_CROS_EC)
static int
keyboard_led_set_brightness_ec_pwm(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct {
struct cros_ec_command msg;
struct ec_params_pwm_set_keyboard_backlight params;
} __packed buf;
struct ec_params_pwm_set_keyboard_backlight *params = &buf.params;
struct cros_ec_command *msg = &buf.msg;
struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
memset(&buf, 0, sizeof(buf));
msg->command = EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT;
msg->outsize = sizeof(*params);
params->percent = brightness;
return cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
}
static enum led_brightness
keyboard_led_get_brightness_ec_pwm(struct led_classdev *cdev)
{
struct {
struct cros_ec_command msg;
struct ec_response_pwm_get_keyboard_backlight resp;
} __packed buf;
struct ec_response_pwm_get_keyboard_backlight *resp = &buf.resp;
struct cros_ec_command *msg = &buf.msg;
struct keyboard_led *keyboard_led = container_of(cdev, struct keyboard_led, cdev);
int ret;
memset(&buf, 0, sizeof(buf));
msg->command = EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT;
msg->insize = sizeof(*resp);
ret = cros_ec_cmd_xfer_status(keyboard_led->ec, msg);
if (ret < 0)
return ret;
return resp->percent;
}
static int keyboard_led_init_ec_pwm(struct platform_device *pdev)
{
struct keyboard_led *keyboard_led = platform_get_drvdata(pdev);
keyboard_led->ec = dev_get_drvdata(pdev->dev.parent);
if (!keyboard_led->ec) {
dev_err(&pdev->dev, "no parent EC device\n");
return -EINVAL;
}
return 0;
}
static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {
.init = keyboard_led_init_ec_pwm,
.brightness_set_blocking = keyboard_led_set_brightness_ec_pwm,
.brightness_get = keyboard_led_get_brightness_ec_pwm,
.max_brightness = KEYBOARD_BACKLIGHT_MAX,
};
#else /* IS_ENABLED(CONFIG_CROS_EC) */
static const __maybe_unused struct keyboard_led_drvdata keyboard_led_drvdata_ec_pwm = {};
#endif /* IS_ENABLED(CONFIG_CROS_EC) */
static int keyboard_led_probe(struct platform_device *pdev)
{
const struct keyboard_led_drvdata *drvdata;
struct keyboard_led *keyboard_led;
int error;
drvdata = device_get_match_data(&pdev->dev);
if (!drvdata)
return -EINVAL;
keyboard_led = devm_kzalloc(&pdev->dev, sizeof(*keyboard_led), GFP_KERNEL);
if (!keyboard_led)
return -ENOMEM;
platform_set_drvdata(pdev, keyboard_led);
if (drvdata->init) {
error = drvdata->init(pdev);
if (error)
return error;
}
keyboard_led->cdev.name = "chromeos::kbd_backlight";
keyboard_led->cdev.flags |= LED_CORE_SUSPENDRESUME;
keyboard_led->cdev.max_brightness = drvdata->max_brightness;
keyboard_led->cdev.brightness_set = drvdata->brightness_set;
keyboard_led->cdev.brightness_set_blocking = drvdata->brightness_set_blocking;
keyboard_led->cdev.brightness_get = drvdata->brightness_get;
error = devm_led_classdev_register(&pdev->dev, &keyboard_led->cdev);
if (error)
return error;
return 0;
}
#ifdef CONFIG_ACPI
static const struct acpi_device_id keyboard_led_acpi_match[] = {
{ "GOOG0002", (kernel_ulong_t)&keyboard_led_drvdata_acpi },
{ }
};
MODULE_DEVICE_TABLE(acpi, keyboard_led_acpi_match);
#endif
#ifdef CONFIG_OF
static const struct of_device_id keyboard_led_of_match[] = {
{
.compatible = "google,cros-kbd-led-backlight",
.data = &keyboard_led_drvdata_ec_pwm,
},
{}
};
MODULE_DEVICE_TABLE(of, keyboard_led_of_match);
#endif
static struct platform_driver keyboard_led_driver = {
.driver = {
.name = "chromeos-keyboard-leds",
.acpi_match_table = ACPI_PTR(keyboard_led_acpi_match),
.of_match_table = of_match_ptr(keyboard_led_of_match),
},
.probe = keyboard_led_probe,
};
module_platform_driver(keyboard_led_driver);
MODULE_AUTHOR("Simon Que <[email protected]>");
MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:chromeos-keyboard-leds");
| linux-master | drivers/platform/chrome/cros_kbd_led_backlight.c |
// SPDX-License-Identifier: GPL-2.0
// ISHTP interface for ChromeOS Embedded Controller
//
// Copyright (c) 2019, Intel Corporation.
//
// ISHTP client driver for talking to the Chrome OS EC firmware running
// on Intel Integrated Sensor Hub (ISH) using the ISH Transport protocol
// (ISH-TP).
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/intel-ish-client-if.h>
#include "cros_ec.h"
/*
* ISH TX/RX ring buffer pool size
*
* The AP->ISH messages and corresponding ISH->AP responses are
* serialized. We need 1 TX and 1 RX buffer for these.
*
* The MKBP ISH->AP events are serialized. We need one additional RX
* buffer for them.
*/
#define CROS_ISH_CL_TX_RING_SIZE 8
#define CROS_ISH_CL_RX_RING_SIZE 8
/* ISH CrOS EC Host Commands */
enum cros_ec_ish_channel {
CROS_EC_COMMAND = 1, /* AP->ISH message */
CROS_MKBP_EVENT = 2, /* ISH->AP events */
};
/*
* ISH firmware timeout for 1 message send failure is 1Hz, and the
* firmware will retry 2 times, so 3Hz is used for timeout.
*/
#define ISHTP_SEND_TIMEOUT (3 * HZ)
/* ISH Transport CrOS EC ISH client unique GUID */
static const struct ishtp_device_id cros_ec_ishtp_id_table[] = {
{ .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), },
{ }
};
MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table);
struct header {
u8 channel;
u8 status;
u8 token;
u8 reserved;
} __packed;
struct cros_ish_out_msg {
struct header hdr;
struct ec_host_request ec_request;
} __packed;
struct cros_ish_in_msg {
struct header hdr;
struct ec_host_response ec_response;
} __packed;
#define IN_MSG_EC_RESPONSE_PREAMBLE \
offsetof(struct cros_ish_in_msg, ec_response)
#define OUT_MSG_EC_REQUEST_PREAMBLE \
offsetof(struct cros_ish_out_msg, ec_request)
#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
/*
* The Read-Write Semaphore is used to prevent message TX or RX while
* the ishtp client is being initialized or undergoing reset.
*
* The readers are the kernel function calls responsible for IA->ISH
* and ISH->AP messaging.
*
* The writers are .reset() and .probe() function.
*/
static DECLARE_RWSEM(init_lock);
/**
* struct response_info - Encapsulate firmware response related
* information for passing between function ish_send() and
* process_recv() callback.
*
* @data: Copy the data received from firmware here.
* @max_size: Max size allocated for the @data buffer. If the received
* data exceeds this value, we log an error.
* @size: Actual size of data received from firmware.
* @error: 0 for success, negative error code for a failure in process_recv().
* @token: Expected token for response that we are waiting on.
* @received: Set to true on receiving a valid firmware response to host command
* @wait_queue: Wait queue for host to wait for firmware response.
*/
struct response_info {
void *data;
size_t max_size;
size_t size;
int error;
u8 token;
bool received;
wait_queue_head_t wait_queue;
};
/**
* struct ishtp_cl_data - Encapsulate per ISH TP Client.
*
* @cros_ish_cl: ISHTP firmware client instance.
* @cl_device: ISHTP client device instance.
* @response: Response info passing between ish_send() and process_recv().
* @work_ishtp_reset: Work queue reset handling.
* @work_ec_evt: Work queue for EC events.
* @ec_dev: CrOS EC MFD device.
*
* This structure is used to store per client data.
*/
struct ishtp_cl_data {
struct ishtp_cl *cros_ish_cl;
struct ishtp_cl_device *cl_device;
/*
* Used for passing firmware response information between
* ish_send() and process_recv() callback.
*/
struct response_info response;
struct work_struct work_ishtp_reset;
struct work_struct work_ec_evt;
struct cros_ec_device *ec_dev;
};
/**
* ish_evt_handler - ISH to AP event handler
* @work: Work struct
*/
static void ish_evt_handler(struct work_struct *work)
{
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ec_evt);
cros_ec_irq_thread(0, client_data->ec_dev);
}
/**
* ish_send() - Send message from host to firmware
*
* @client_data: Client data instance
* @out_msg: Message buffer to be sent to firmware
* @out_size: Size of out going message
* @in_msg: Message buffer where the incoming data is copied. This buffer
* is allocated by calling
* @in_size: Max size of incoming message
*
* Return: Number of bytes copied in the in_msg on success, negative
* error code on failure.
*/
static int ish_send(struct ishtp_cl_data *client_data,
u8 *out_msg, size_t out_size,
u8 *in_msg, size_t in_size)
{
static u8 next_token;
int rv;
struct header *out_hdr = (struct header *)out_msg;
struct ishtp_cl *cros_ish_cl = client_data->cros_ish_cl;
dev_dbg(cl_data_to_dev(client_data),
"%s: channel=%02u status=%02u\n",
__func__, out_hdr->channel, out_hdr->status);
/* Setup for incoming response */
client_data->response.data = in_msg;
client_data->response.max_size = in_size;
client_data->response.error = 0;
client_data->response.token = next_token++;
client_data->response.received = false;
out_hdr->token = client_data->response.token;
rv = ishtp_cl_send(cros_ish_cl, out_msg, out_size);
if (rv) {
dev_err(cl_data_to_dev(client_data),
"ishtp_cl_send error %d\n", rv);
return rv;
}
wait_event_interruptible_timeout(client_data->response.wait_queue,
client_data->response.received,
ISHTP_SEND_TIMEOUT);
if (!client_data->response.received) {
dev_err(cl_data_to_dev(client_data),
"Timed out for response to host message\n");
return -ETIMEDOUT;
}
if (client_data->response.error < 0)
return client_data->response.error;
return client_data->response.size;
}
/**
* process_recv() - Received and parse incoming packet
* @cros_ish_cl: Client instance to get stats
* @rb_in_proc: Host interface message buffer
* @timestamp: Timestamp of when parent callback started
*
* Parse the incoming packet. If it is a response packet then it will
* update per instance flags and wake up the caller waiting to for the
* response. If it is an event packet then it will schedule event work.
*/
static void process_recv(struct ishtp_cl *cros_ish_cl,
struct ishtp_cl_rb *rb_in_proc, ktime_t timestamp)
{
size_t data_len = rb_in_proc->buf_idx;
struct ishtp_cl_data *client_data =
ishtp_get_client_data(cros_ish_cl);
struct device *dev = cl_data_to_dev(client_data);
struct cros_ish_in_msg *in_msg =
(struct cros_ish_in_msg *)rb_in_proc->buffer.data;
/* Proceed only if reset or init is not in progress */
if (!down_read_trylock(&init_lock)) {
/* Free the buffer */
ishtp_cl_io_rb_recycle(rb_in_proc);
dev_warn(dev,
"Host is not ready to receive incoming messages\n");
return;
}
/*
* All firmware messages contain a header. Check the buffer size
* before accessing elements inside.
*/
if (!rb_in_proc->buffer.data) {
dev_warn(dev, "rb_in_proc->buffer.data returned null");
client_data->response.error = -EBADMSG;
goto end_error;
}
if (data_len < sizeof(struct header)) {
dev_err(dev, "data size %zu is less than header %zu\n",
data_len, sizeof(struct header));
client_data->response.error = -EMSGSIZE;
goto end_error;
}
dev_dbg(dev, "channel=%02u status=%02u\n",
in_msg->hdr.channel, in_msg->hdr.status);
switch (in_msg->hdr.channel) {
case CROS_EC_COMMAND:
if (client_data->response.received) {
dev_err(dev,
"Previous firmware message not yet processed\n");
goto end_error;
}
if (client_data->response.token != in_msg->hdr.token) {
dev_err_ratelimited(dev,
"Dropping old response token %d\n",
in_msg->hdr.token);
goto end_error;
}
/* Sanity check */
if (!client_data->response.data) {
dev_err(dev,
"Receiving buffer is null. Should be allocated by calling function\n");
client_data->response.error = -EINVAL;
goto error_wake_up;
}
if (data_len > client_data->response.max_size) {
dev_err(dev,
"Received buffer size %zu is larger than allocated buffer %zu\n",
data_len, client_data->response.max_size);
client_data->response.error = -EMSGSIZE;
goto error_wake_up;
}
if (in_msg->hdr.status) {
dev_err(dev, "firmware returned status %d\n",
in_msg->hdr.status);
client_data->response.error = -EIO;
goto error_wake_up;
}
/* Update the actual received buffer size */
client_data->response.size = data_len;
/*
* Copy the buffer received in firmware response for the
* calling thread.
*/
memcpy(client_data->response.data,
rb_in_proc->buffer.data, data_len);
error_wake_up:
/* Free the buffer since we copied data or didn't need it */
ishtp_cl_io_rb_recycle(rb_in_proc);
rb_in_proc = NULL;
/* Set flag before waking up the caller */
client_data->response.received = true;
/* Wake the calling thread */
wake_up_interruptible(&client_data->response.wait_queue);
break;
case CROS_MKBP_EVENT:
/* Free the buffer. This is just an event without data */
ishtp_cl_io_rb_recycle(rb_in_proc);
rb_in_proc = NULL;
/*
* Set timestamp from beginning of function since we actually
* got an incoming MKBP event
*/
client_data->ec_dev->last_event_time = timestamp;
schedule_work(&client_data->work_ec_evt);
break;
default:
dev_err(dev, "Invalid channel=%02d\n", in_msg->hdr.channel);
}
end_error:
/* Free the buffer if we already haven't */
if (rb_in_proc)
ishtp_cl_io_rb_recycle(rb_in_proc);
up_read(&init_lock);
}
/**
* ish_event_cb() - bus driver callback for incoming message
* @cl_device: ISHTP client device for which this message is targeted.
*
* Remove the packet from the list and process the message by calling
* process_recv.
*/
static void ish_event_cb(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl_rb *rb_in_proc;
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
ktime_t timestamp;
/*
* Take timestamp as close to hardware interrupt as possible for sensor
* timestamps.
*/
timestamp = cros_ec_get_time_ns();
while ((rb_in_proc = ishtp_cl_rx_get_rb(cros_ish_cl)) != NULL) {
/* Decide what to do with received data */
process_recv(cros_ish_cl, rb_in_proc, timestamp);
}
}
/**
* cros_ish_init() - Init function for ISHTP client
* @cros_ish_cl: ISHTP client instance
*
* This function complete the initializtion of the client.
*
* Return: 0 for success, negative error code for failure.
*/
static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
{
int rv;
struct ishtp_device *dev;
struct ishtp_fw_client *fw_client;
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
rv = ishtp_cl_link(cros_ish_cl);
if (rv) {
dev_err(cl_data_to_dev(client_data),
"ishtp_cl_link failed\n");
return rv;
}
dev = ishtp_get_ishtp_device(cros_ish_cl);
/* Connect to firmware client */
ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
rv = -ENOENT;
goto err_cl_unlink;
}
ishtp_cl_set_fw_client_id(cros_ish_cl,
ishtp_get_fw_client_id(fw_client));
ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_CONNECTING);
rv = ishtp_cl_connect(cros_ish_cl);
if (rv) {
dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
goto err_cl_unlink;
}
ishtp_register_event_cb(client_data->cl_device, ish_event_cb);
return 0;
err_cl_unlink:
ishtp_cl_unlink(cros_ish_cl);
return rv;
}
/**
* cros_ish_deinit() - Deinit function for ISHTP client
* @cros_ish_cl: ISHTP client instance
*
* Unlink and free cros_ec client
*/
static void cros_ish_deinit(struct ishtp_cl *cros_ish_cl)
{
ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(cros_ish_cl);
ishtp_cl_unlink(cros_ish_cl);
ishtp_cl_flush_queues(cros_ish_cl);
/* Disband and free all Tx and Rx client-level rings */
ishtp_cl_free(cros_ish_cl);
}
/**
* prepare_cros_ec_rx() - Check & prepare receive buffer
* @ec_dev: CrOS EC MFD device.
* @in_msg: Incoming message buffer
* @msg: cros_ec command used to send & receive data
*
* Return: 0 for success, negative error code for failure.
*
* Check the received buffer. Convert to cros_ec_command format.
*/
static int prepare_cros_ec_rx(struct cros_ec_device *ec_dev,
const struct cros_ish_in_msg *in_msg,
struct cros_ec_command *msg)
{
u8 sum = 0;
int i, rv, offset;
/* Check response error code */
msg->result = in_msg->ec_response.result;
rv = cros_ec_check_result(ec_dev, msg);
if (rv < 0)
return rv;
if (in_msg->ec_response.data_len > msg->insize) {
dev_err(ec_dev->dev, "Packet too long (%d bytes, expected %d)",
in_msg->ec_response.data_len, msg->insize);
return -ENOSPC;
}
/* Copy response packet payload and compute checksum */
for (i = 0; i < sizeof(struct ec_host_response); i++)
sum += ((u8 *)in_msg)[IN_MSG_EC_RESPONSE_PREAMBLE + i];
offset = sizeof(struct cros_ish_in_msg);
for (i = 0; i < in_msg->ec_response.data_len; i++)
sum += msg->data[i] = ((u8 *)in_msg)[offset + i];
if (sum) {
dev_dbg(ec_dev->dev, "Bad received packet checksum %d\n", sum);
return -EBADMSG;
}
return 0;
}
static int cros_ec_pkt_xfer_ish(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
int rv;
struct ishtp_cl *cros_ish_cl = ec_dev->priv;
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
struct device *dev = cl_data_to_dev(client_data);
struct cros_ish_in_msg *in_msg = (struct cros_ish_in_msg *)ec_dev->din;
struct cros_ish_out_msg *out_msg =
(struct cros_ish_out_msg *)ec_dev->dout;
size_t in_size = sizeof(struct cros_ish_in_msg) + msg->insize;
size_t out_size = sizeof(struct cros_ish_out_msg) + msg->outsize;
/* Sanity checks */
if (in_size > ec_dev->din_size) {
dev_err(dev,
"Incoming payload size %zu is too large for ec_dev->din_size %d\n",
in_size, ec_dev->din_size);
return -EMSGSIZE;
}
if (out_size > ec_dev->dout_size) {
dev_err(dev,
"Outgoing payload size %zu is too large for ec_dev->dout_size %d\n",
out_size, ec_dev->dout_size);
return -EMSGSIZE;
}
/* Proceed only if reset-init is not in progress */
if (!down_read_trylock(&init_lock)) {
dev_warn(dev,
"Host is not ready to send messages to ISH. Try again\n");
return -EAGAIN;
}
/* Prepare the package to be sent over ISH TP */
out_msg->hdr.channel = CROS_EC_COMMAND;
out_msg->hdr.status = 0;
ec_dev->dout += OUT_MSG_EC_REQUEST_PREAMBLE;
rv = cros_ec_prepare_tx(ec_dev, msg);
if (rv < 0)
goto end_error;
ec_dev->dout -= OUT_MSG_EC_REQUEST_PREAMBLE;
dev_dbg(dev,
"out_msg: struct_ver=0x%x checksum=0x%x command=0x%x command_ver=0x%x data_len=0x%x\n",
out_msg->ec_request.struct_version,
out_msg->ec_request.checksum,
out_msg->ec_request.command,
out_msg->ec_request.command_version,
out_msg->ec_request.data_len);
/* Send command to ISH EC firmware and read response */
rv = ish_send(client_data,
(u8 *)out_msg, out_size,
(u8 *)in_msg, in_size);
if (rv < 0)
goto end_error;
rv = prepare_cros_ec_rx(ec_dev, in_msg, msg);
if (rv)
goto end_error;
rv = in_msg->ec_response.data_len;
dev_dbg(dev,
"in_msg: struct_ver=0x%x checksum=0x%x result=0x%x data_len=0x%x\n",
in_msg->ec_response.struct_version,
in_msg->ec_response.checksum,
in_msg->ec_response.result,
in_msg->ec_response.data_len);
end_error:
if (msg->command == EC_CMD_REBOOT_EC)
msleep(EC_REBOOT_DELAY_MS);
up_read(&init_lock);
return rv;
}
static int cros_ec_dev_init(struct ishtp_cl_data *client_data)
{
struct cros_ec_device *ec_dev;
struct device *dev = cl_data_to_dev(client_data);
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
client_data->ec_dev = ec_dev;
dev->driver_data = ec_dev;
ec_dev->dev = dev;
ec_dev->priv = client_data->cros_ish_cl;
ec_dev->cmd_xfer = NULL;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_ish;
ec_dev->phys_name = dev_name(dev);
ec_dev->din_size = sizeof(struct cros_ish_in_msg) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct cros_ish_out_msg);
return cros_ec_register(ec_dev);
}
static void reset_handler(struct work_struct *work)
{
int rv;
struct device *dev;
struct ishtp_cl *cros_ish_cl;
struct ishtp_cl_device *cl_device;
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ishtp_reset);
/* Lock for reset to complete */
down_write(&init_lock);
cros_ish_cl = client_data->cros_ish_cl;
cl_device = client_data->cl_device;
/* Unlink, flush queues & start again */
ishtp_cl_unlink(cros_ish_cl);
ishtp_cl_flush_queues(cros_ish_cl);
ishtp_cl_free(cros_ish_cl);
cros_ish_cl = ishtp_cl_allocate(cl_device);
if (!cros_ish_cl) {
up_write(&init_lock);
return;
}
ishtp_set_drvdata(cl_device, cros_ish_cl);
ishtp_set_client_data(cros_ish_cl, client_data);
client_data->cros_ish_cl = cros_ish_cl;
rv = cros_ish_init(cros_ish_cl);
if (rv) {
ishtp_cl_free(cros_ish_cl);
dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
up_write(&init_lock);
return;
}
/* Refresh ec_dev device pointers */
client_data->ec_dev->priv = client_data->cros_ish_cl;
dev = cl_data_to_dev(client_data);
dev->driver_data = client_data->ec_dev;
dev_info(cl_data_to_dev(client_data), "Chrome EC ISH reset done\n");
up_write(&init_lock);
}
/**
* cros_ec_ishtp_probe() - ISHTP client driver probe callback
* @cl_device: ISHTP client device instance
*
* Return: 0 for success, negative error code for failure.
*/
static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device)
{
int rv;
struct ishtp_cl *cros_ish_cl;
struct ishtp_cl_data *client_data =
devm_kzalloc(ishtp_device(cl_device),
sizeof(*client_data), GFP_KERNEL);
if (!client_data)
return -ENOMEM;
/* Lock for initialization to complete */
down_write(&init_lock);
cros_ish_cl = ishtp_cl_allocate(cl_device);
if (!cros_ish_cl) {
rv = -ENOMEM;
goto end_ishtp_cl_alloc_error;
}
ishtp_set_drvdata(cl_device, cros_ish_cl);
ishtp_set_client_data(cros_ish_cl, client_data);
client_data->cros_ish_cl = cros_ish_cl;
client_data->cl_device = cl_device;
init_waitqueue_head(&client_data->response.wait_queue);
INIT_WORK(&client_data->work_ishtp_reset,
reset_handler);
INIT_WORK(&client_data->work_ec_evt,
ish_evt_handler);
rv = cros_ish_init(cros_ish_cl);
if (rv)
goto end_ishtp_cl_init_error;
ishtp_get_device(cl_device);
up_write(&init_lock);
/* Register croc_ec_dev mfd */
rv = cros_ec_dev_init(client_data);
if (rv) {
down_write(&init_lock);
goto end_cros_ec_dev_init_error;
}
return 0;
end_cros_ec_dev_init_error:
ishtp_set_connection_state(cros_ish_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(cros_ish_cl);
ishtp_cl_unlink(cros_ish_cl);
ishtp_cl_flush_queues(cros_ish_cl);
ishtp_put_device(cl_device);
end_ishtp_cl_init_error:
ishtp_cl_free(cros_ish_cl);
end_ishtp_cl_alloc_error:
up_write(&init_lock);
return rv;
}
/**
* cros_ec_ishtp_remove() - ISHTP client driver remove callback
* @cl_device: ISHTP client device instance
*
* Return: 0
*/
static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
cancel_work_sync(&client_data->work_ishtp_reset);
cancel_work_sync(&client_data->work_ec_evt);
cros_ish_deinit(cros_ish_cl);
ishtp_put_device(cl_device);
}
/**
* cros_ec_ishtp_reset() - ISHTP client driver reset callback
* @cl_device: ISHTP client device instance
*
* Return: 0
*/
static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
schedule_work(&client_data->work_ishtp_reset);
return 0;
}
/**
* cros_ec_ishtp_suspend() - ISHTP client driver suspend callback
* @device: device instance
*
* Return: 0 for success, negative error code for failure.
*/
static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
{
struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
return cros_ec_suspend(client_data->ec_dev);
}
/**
* cros_ec_ishtp_resume() - ISHTP client driver resume callback
* @device: device instance
*
* Return: 0 for success, negative error code for failure.
*/
static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
{
struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
return cros_ec_resume(client_data->ec_dev);
}
static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend,
cros_ec_ishtp_resume);
static struct ishtp_cl_driver cros_ec_ishtp_driver = {
.name = "cros_ec_ishtp",
.id = cros_ec_ishtp_id_table,
.probe = cros_ec_ishtp_probe,
.remove = cros_ec_ishtp_remove,
.reset = cros_ec_ishtp_reset,
.driver = {
.pm = &cros_ec_ishtp_pm_ops,
},
};
static int __init cros_ec_ishtp_mod_init(void)
{
return ishtp_cl_driver_register(&cros_ec_ishtp_driver, THIS_MODULE);
}
static void __exit cros_ec_ishtp_mod_exit(void)
{
ishtp_cl_driver_unregister(&cros_ec_ishtp_driver);
}
module_init(cros_ec_ishtp_mod_init);
module_exit(cros_ec_ishtp_mod_exit);
MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver");
MODULE_AUTHOR("Rushikesh S Kadam <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/platform/chrome/cros_ec_ishtp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Miscellaneous character driver for ChromeOS Embedded Controller
*
* Copyright 2014 Google, Inc.
* Copyright 2019 Google LLC
*
* This file is a rework and part of the code is ported from
* drivers/mfd/cros_ec_dev.c that was originally written by
* Bill Richardson.
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/platform_data/cros_ec_chardev.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#define DRV_NAME "cros-ec-chardev"
/* Arbitrary bounded size for the event queue */
#define CROS_MAX_EVENT_LEN PAGE_SIZE
struct chardev_data {
struct cros_ec_dev *ec_dev;
struct miscdevice misc;
};
struct chardev_priv {
struct cros_ec_dev *ec_dev;
struct notifier_block notifier;
wait_queue_head_t wait_event;
unsigned long event_mask;
struct list_head events;
size_t event_len;
};
struct ec_event {
struct list_head node;
size_t size;
u8 event_type;
u8 data[];
};
static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
{
static const char * const current_image_name[] = {
"unknown", "read-only", "read-write", "invalid",
};
struct ec_response_get_version *resp;
struct cros_ec_command *msg;
int ret;
msg = kzalloc(sizeof(*msg) + sizeof(*resp), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
msg->insize = sizeof(*resp);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
snprintf(str, maxlen,
"Unknown EC version, returned error: %d\n",
msg->result);
goto exit;
}
resp = (struct ec_response_get_version *)msg->data;
if (resp->current_image >= ARRAY_SIZE(current_image_name))
resp->current_image = 3; /* invalid */
snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
resp->version_string_ro, resp->version_string_rw,
current_image_name[resp->current_image]);
ret = 0;
exit:
kfree(msg);
return ret;
}
static int cros_ec_chardev_mkbp_event(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *_notify)
{
struct chardev_priv *priv = container_of(nb, struct chardev_priv,
notifier);
struct cros_ec_device *ec_dev = priv->ec_dev->ec_dev;
struct ec_event *event;
unsigned long event_bit = 1 << ec_dev->event_data.event_type;
int total_size = sizeof(*event) + ec_dev->event_size;
if (!(event_bit & priv->event_mask) ||
(priv->event_len + total_size) > CROS_MAX_EVENT_LEN)
return NOTIFY_DONE;
event = kzalloc(total_size, GFP_KERNEL);
if (!event)
return NOTIFY_DONE;
event->size = ec_dev->event_size;
event->event_type = ec_dev->event_data.event_type;
memcpy(event->data, &ec_dev->event_data.data, ec_dev->event_size);
spin_lock(&priv->wait_event.lock);
list_add_tail(&event->node, &priv->events);
priv->event_len += total_size;
wake_up_locked(&priv->wait_event);
spin_unlock(&priv->wait_event.lock);
return NOTIFY_OK;
}
static struct ec_event *cros_ec_chardev_fetch_event(struct chardev_priv *priv,
bool fetch, bool block)
{
struct ec_event *event;
int err;
spin_lock(&priv->wait_event.lock);
if (!block && list_empty(&priv->events)) {
event = ERR_PTR(-EWOULDBLOCK);
goto out;
}
if (!fetch) {
event = NULL;
goto out;
}
err = wait_event_interruptible_locked(priv->wait_event,
!list_empty(&priv->events));
if (err) {
event = ERR_PTR(err);
goto out;
}
event = list_first_entry(&priv->events, struct ec_event, node);
list_del(&event->node);
priv->event_len -= sizeof(*event) + event->size;
out:
spin_unlock(&priv->wait_event.lock);
return event;
}
/*
* Device file ops
*/
static int cros_ec_chardev_open(struct inode *inode, struct file *filp)
{
struct miscdevice *mdev = filp->private_data;
struct cros_ec_dev *ec_dev = dev_get_drvdata(mdev->parent);
struct chardev_priv *priv;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ec_dev = ec_dev;
filp->private_data = priv;
INIT_LIST_HEAD(&priv->events);
init_waitqueue_head(&priv->wait_event);
nonseekable_open(inode, filp);
priv->notifier.notifier_call = cros_ec_chardev_mkbp_event;
ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
&priv->notifier);
if (ret) {
dev_err(ec_dev->dev, "failed to register event notifier\n");
kfree(priv);
}
return ret;
}
static __poll_t cros_ec_chardev_poll(struct file *filp, poll_table *wait)
{
struct chardev_priv *priv = filp->private_data;
poll_wait(filp, &priv->wait_event, wait);
if (list_empty(&priv->events))
return 0;
return EPOLLIN | EPOLLRDNORM;
}
static ssize_t cros_ec_chardev_read(struct file *filp, char __user *buffer,
size_t length, loff_t *offset)
{
char msg[sizeof(struct ec_response_get_version) +
sizeof(CROS_EC_DEV_VERSION)];
struct chardev_priv *priv = filp->private_data;
struct cros_ec_dev *ec_dev = priv->ec_dev;
size_t count;
int ret;
if (priv->event_mask) { /* queued MKBP event */
struct ec_event *event;
event = cros_ec_chardev_fetch_event(priv, length != 0,
!(filp->f_flags & O_NONBLOCK));
if (IS_ERR(event))
return PTR_ERR(event);
/*
* length == 0 is special - no IO is done but we check
* for error conditions.
*/
if (length == 0)
return 0;
/* The event is 1 byte of type plus the payload */
count = min(length, event->size + 1);
ret = copy_to_user(buffer, &event->event_type, count);
kfree(event);
if (ret) /* the copy failed */
return -EFAULT;
*offset = count;
return count;
}
/*
* Legacy behavior if no event mask is defined
*/
if (*offset != 0)
return 0;
ret = ec_get_version(ec_dev, msg, sizeof(msg));
if (ret)
return ret;
count = min(length, strlen(msg));
if (copy_to_user(buffer, msg, count))
return -EFAULT;
*offset = count;
return count;
}
static int cros_ec_chardev_release(struct inode *inode, struct file *filp)
{
struct chardev_priv *priv = filp->private_data;
struct cros_ec_dev *ec_dev = priv->ec_dev;
struct ec_event *event, *e;
blocking_notifier_chain_unregister(&ec_dev->ec_dev->event_notifier,
&priv->notifier);
list_for_each_entry_safe(event, e, &priv->events, node) {
list_del(&event->node);
kfree(event);
}
kfree(priv);
return 0;
}
/*
* Ioctls
*/
static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
{
struct cros_ec_command *s_cmd;
struct cros_ec_command u_cmd;
long ret;
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
if (u_cmd.outsize > EC_MAX_MSG_BYTES ||
u_cmd.insize > EC_MAX_MSG_BYTES)
return -EINVAL;
s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
if (copy_from_user(s_cmd, arg, sizeof(*s_cmd) + u_cmd.outsize)) {
ret = -EFAULT;
goto exit;
}
if (u_cmd.outsize != s_cmd->outsize ||
u_cmd.insize != s_cmd->insize) {
ret = -EINVAL;
goto exit;
}
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
return ret;
}
static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
void __user *arg)
{
struct cros_ec_device *ec_dev = ec->ec_dev;
struct cros_ec_readmem s_mem = { };
long num;
/* Not every platform supports direct reads */
if (!ec_dev->cmd_readmem)
return -ENOTTY;
if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
return -EFAULT;
if (s_mem.bytes > sizeof(s_mem.buffer))
return -EINVAL;
num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
s_mem.buffer);
if (num <= 0)
return num;
if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
return -EFAULT;
return num;
}
static long cros_ec_chardev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct chardev_priv *priv = filp->private_data;
struct cros_ec_dev *ec = priv->ec_dev;
if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
return -ENOTTY;
switch (cmd) {
case CROS_EC_DEV_IOCXCMD:
return cros_ec_chardev_ioctl_xcmd(ec, (void __user *)arg);
case CROS_EC_DEV_IOCRDMEM:
return cros_ec_chardev_ioctl_readmem(ec, (void __user *)arg);
case CROS_EC_DEV_IOCEVENTMASK:
priv->event_mask = arg;
return 0;
}
return -ENOTTY;
}
static const struct file_operations chardev_fops = {
.open = cros_ec_chardev_open,
.poll = cros_ec_chardev_poll,
.read = cros_ec_chardev_read,
.release = cros_ec_chardev_release,
.unlocked_ioctl = cros_ec_chardev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = cros_ec_chardev_ioctl,
#endif
};
static int cros_ec_chardev_probe(struct platform_device *pdev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
struct cros_ec_platform *ec_platform = dev_get_platdata(ec_dev->dev);
struct chardev_data *data;
/* Create a char device: we want to create it anew */
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->ec_dev = ec_dev;
data->misc.minor = MISC_DYNAMIC_MINOR;
data->misc.fops = &chardev_fops;
data->misc.name = ec_platform->ec_name;
data->misc.parent = pdev->dev.parent;
dev_set_drvdata(&pdev->dev, data);
return misc_register(&data->misc);
}
static int cros_ec_chardev_remove(struct platform_device *pdev)
{
struct chardev_data *data = dev_get_drvdata(&pdev->dev);
misc_deregister(&data->misc);
return 0;
}
static struct platform_driver cros_ec_chardev_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_ec_chardev_probe,
.remove = cros_ec_chardev_remove,
};
module_platform_driver(cros_ec_chardev_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>");
MODULE_DESCRIPTION("ChromeOS EC Miscellaneous Character Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/cros_ec_chardev.c |
// SPDX-License-Identifier: GPL-2.0+
// Debug logs for the ChromeOS EC
//
// Copyright (C) 2015 Google, Inc.
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#define DRV_NAME "cros-ec-debugfs"
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1))
/* waitqueue for log readers */
static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
/**
* struct cros_ec_debugfs - EC debugging information.
*
* @ec: EC device this debugfs information belongs to
* @dir: dentry for debugfs files
* @log_buffer: circular buffer for console log information
* @read_msg: preallocated EC command and buffer to read console log
* @log_mutex: mutex to protect circular buffer
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
* @notifier_panic: notifier_block to let kernel to flush buffered log
* when EC panic
*/
struct cros_ec_debugfs {
struct cros_ec_dev *ec;
struct dentry *dir;
/* EC log */
struct circ_buf log_buffer;
struct cros_ec_command *read_msg;
struct mutex log_mutex;
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
struct notifier_block notifier_panic;
};
/*
* We need to make sure that the EC log buffer on the UART is large enough,
* so that it is unlikely enough to overlow within LOG_POLL_SEC.
*/
static void cros_ec_console_log_work(struct work_struct *__work)
{
struct cros_ec_debugfs *debug_info =
container_of(to_delayed_work(__work),
struct cros_ec_debugfs,
log_poll_work);
struct cros_ec_dev *ec = debug_info->ec;
struct circ_buf *cb = &debug_info->log_buffer;
struct cros_ec_command snapshot_msg = {
.command = EC_CMD_CONSOLE_SNAPSHOT + ec->cmd_offset,
};
struct ec_params_console_read_v1 *read_params =
(struct ec_params_console_read_v1 *)debug_info->read_msg->data;
uint8_t *ec_buffer = (uint8_t *)debug_info->read_msg->data;
int idx;
int buf_space;
int ret;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, &snapshot_msg);
if (ret < 0)
goto resched;
/* Loop until we have read everything, or there's an error. */
mutex_lock(&debug_info->log_mutex);
buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE);
while (1) {
if (!buf_space) {
dev_info_once(ec->dev,
"Some logs may have been dropped...\n");
break;
}
memset(read_params, '\0', sizeof(*read_params));
read_params->subcmd = CONSOLE_READ_RECENT;
ret = cros_ec_cmd_xfer_status(ec->ec_dev,
debug_info->read_msg);
if (ret < 0)
break;
/* If the buffer is empty, we're done here. */
if (ret == 0 || ec_buffer[0] == '\0')
break;
idx = 0;
while (idx < ret && ec_buffer[idx] != '\0' && buf_space > 0) {
cb->buf[cb->head] = ec_buffer[idx];
cb->head = CIRC_ADD(cb->head, LOG_SIZE, 1);
idx++;
buf_space--;
}
wake_up(&cros_ec_debugfs_log_wq);
}
mutex_unlock(&debug_info->log_mutex);
resched:
schedule_delayed_work(&debug_info->log_poll_work,
msecs_to_jiffies(LOG_POLL_SEC * 1000));
}
static int cros_ec_console_log_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return stream_open(inode, file);
}
static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct cros_ec_debugfs *debug_info = file->private_data;
struct circ_buf *cb = &debug_info->log_buffer;
ssize_t ret;
mutex_lock(&debug_info->log_mutex);
while (!CIRC_CNT(cb->head, cb->tail, LOG_SIZE)) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
goto error;
}
mutex_unlock(&debug_info->log_mutex);
ret = wait_event_interruptible(cros_ec_debugfs_log_wq,
CIRC_CNT(cb->head, cb->tail, LOG_SIZE));
if (ret < 0)
return ret;
mutex_lock(&debug_info->log_mutex);
}
/* Only copy until the end of the circular buffer, and let userspace
* retry to get the rest of the data.
*/
ret = min_t(size_t, CIRC_CNT_TO_END(cb->head, cb->tail, LOG_SIZE),
count);
if (copy_to_user(buf, cb->buf + cb->tail, ret)) {
ret = -EFAULT;
goto error;
}
cb->tail = CIRC_ADD(cb->tail, LOG_SIZE, ret);
error:
mutex_unlock(&debug_info->log_mutex);
return ret;
}
static __poll_t cros_ec_console_log_poll(struct file *file,
poll_table *wait)
{
struct cros_ec_debugfs *debug_info = file->private_data;
__poll_t mask = 0;
poll_wait(file, &cros_ec_debugfs_log_wq, wait);
mutex_lock(&debug_info->log_mutex);
if (CIRC_CNT(debug_info->log_buffer.head,
debug_info->log_buffer.tail,
LOG_SIZE))
mask |= EPOLLIN | EPOLLRDNORM;
mutex_unlock(&debug_info->log_mutex);
return mask;
}
static int cros_ec_console_log_release(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t cros_ec_pdinfo_read(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
char read_buf[EC_USB_PD_MAX_PORTS * 40], *p = read_buf;
struct cros_ec_debugfs *debug_info = file->private_data;
struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
struct {
struct cros_ec_command msg;
union {
struct ec_response_usb_pd_control_v1 resp;
struct ec_params_usb_pd_control params;
};
} __packed ec_buf;
struct cros_ec_command *msg;
struct ec_response_usb_pd_control_v1 *resp;
struct ec_params_usb_pd_control *params;
int i;
msg = &ec_buf.msg;
params = (struct ec_params_usb_pd_control *)msg->data;
resp = (struct ec_response_usb_pd_control_v1 *)msg->data;
msg->command = EC_CMD_USB_PD_CONTROL;
msg->version = 1;
msg->insize = sizeof(*resp);
msg->outsize = sizeof(*params);
/*
* Read status from all PD ports until failure, typically caused
* by attempting to read status on a port that doesn't exist.
*/
for (i = 0; i < EC_USB_PD_MAX_PORTS; ++i) {
params->port = i;
params->role = 0;
params->mux = 0;
params->swap = 0;
if (cros_ec_cmd_xfer_status(ec_dev, msg) < 0)
break;
p += scnprintf(p, sizeof(read_buf) + read_buf - p,
"p%d: %s en:%.2x role:%.2x pol:%.2x\n", i,
resp->state, resp->enabled, resp->role,
resp->polarity);
}
return simple_read_from_buffer(user_buf, count, ppos,
read_buf, p - read_buf);
}
static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev)
{
struct {
struct cros_ec_command cmd;
struct ec_response_uptime_info resp;
} __packed msg = {};
int ret;
msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
msg.cmd.insize = sizeof(msg.resp);
ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND)
return false;
/* Other errors maybe a transient error, do not rule about support. */
return true;
}
static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct cros_ec_debugfs *debug_info = file->private_data;
struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
struct {
struct cros_ec_command cmd;
struct ec_response_uptime_info resp;
} __packed msg = {};
struct ec_response_uptime_info *resp;
char read_buf[32];
int ret;
resp = (struct ec_response_uptime_info *)&msg.resp;
msg.cmd.command = EC_CMD_GET_UPTIME_INFO;
msg.cmd.insize = sizeof(*resp);
ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd);
if (ret < 0)
return ret;
ret = scnprintf(read_buf, sizeof(read_buf), "%u\n",
resp->time_since_ec_boot_ms);
return simple_read_from_buffer(user_buf, count, ppos, read_buf, ret);
}
static const struct file_operations cros_ec_console_log_fops = {
.owner = THIS_MODULE,
.open = cros_ec_console_log_open,
.read = cros_ec_console_log_read,
.llseek = no_llseek,
.poll = cros_ec_console_log_poll,
.release = cros_ec_console_log_release,
};
static const struct file_operations cros_ec_pdinfo_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = cros_ec_pdinfo_read,
.llseek = default_llseek,
};
static const struct file_operations cros_ec_uptime_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = cros_ec_uptime_read,
.llseek = default_llseek,
};
static int ec_read_version_supported(struct cros_ec_dev *ec)
{
struct ec_params_get_cmd_versions_v1 *params;
struct ec_response_get_cmd_versions *response;
int ret;
struct cros_ec_command *msg;
msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)),
GFP_KERNEL);
if (!msg)
return 0;
msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset;
msg->outsize = sizeof(*params);
msg->insize = sizeof(*response);
params = (struct ec_params_get_cmd_versions_v1 *)msg->data;
params->cmd = EC_CMD_CONSOLE_READ;
response = (struct ec_response_get_cmd_versions *)msg->data;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg) >= 0 &&
response->version_mask & EC_VER_MASK(1);
kfree(msg);
return ret;
}
static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info)
{
struct cros_ec_dev *ec = debug_info->ec;
char *buf;
int read_params_size;
int read_response_size;
/*
* If the console log feature is not supported return silently and
* don't create the console_log entry.
*/
if (!ec_read_version_supported(ec))
return 0;
buf = devm_kzalloc(ec->dev, LOG_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
read_params_size = sizeof(struct ec_params_console_read_v1);
read_response_size = ec->ec_dev->max_response;
debug_info->read_msg = devm_kzalloc(ec->dev,
sizeof(*debug_info->read_msg) +
max(read_params_size, read_response_size), GFP_KERNEL);
if (!debug_info->read_msg)
return -ENOMEM;
debug_info->read_msg->version = 1;
debug_info->read_msg->command = EC_CMD_CONSOLE_READ + ec->cmd_offset;
debug_info->read_msg->outsize = read_params_size;
debug_info->read_msg->insize = read_response_size;
debug_info->log_buffer.buf = buf;
debug_info->log_buffer.head = 0;
debug_info->log_buffer.tail = 0;
mutex_init(&debug_info->log_mutex);
debugfs_create_file("console_log", S_IFREG | 0444, debug_info->dir,
debug_info, &cros_ec_console_log_fops);
INIT_DELAYED_WORK(&debug_info->log_poll_work,
cros_ec_console_log_work);
schedule_delayed_work(&debug_info->log_poll_work, 0);
return 0;
}
static void cros_ec_cleanup_console_log(struct cros_ec_debugfs *debug_info)
{
if (debug_info->log_buffer.buf) {
cancel_delayed_work_sync(&debug_info->log_poll_work);
mutex_destroy(&debug_info->log_mutex);
}
}
/*
* Returns the size of the panicinfo data fetched from the EC
*/
static int cros_ec_get_panicinfo(struct cros_ec_device *ec_dev, uint8_t *data,
int data_size)
{
int ret;
struct cros_ec_command *msg;
if (!data || data_size <= 0 || data_size > ec_dev->max_response)
return -EINVAL;
msg = kzalloc(sizeof(*msg) + data_size, GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_GET_PANIC_INFO;
msg->insize = data_size;
ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret < 0)
goto free;
memcpy(data, msg->data, data_size);
free:
kfree(msg);
return ret;
}
static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info)
{
struct cros_ec_device *ec_dev = debug_info->ec->ec_dev;
int ret;
void *data;
data = devm_kzalloc(debug_info->ec->dev, ec_dev->max_response,
GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = cros_ec_get_panicinfo(ec_dev, data, ec_dev->max_response);
if (ret < 0) {
ret = 0;
goto free;
}
/* No panic data */
if (ret == 0)
goto free;
debug_info->panicinfo_blob.data = data;
debug_info->panicinfo_blob.size = ret;
debugfs_create_blob("panicinfo", S_IFREG | 0444, debug_info->dir,
&debug_info->panicinfo_blob);
return 0;
free:
devm_kfree(debug_info->ec->dev, data);
return ret;
}
static int cros_ec_debugfs_panic_event(struct notifier_block *nb,
unsigned long queued_during_suspend, void *_notify)
{
struct cros_ec_debugfs *debug_info =
container_of(nb, struct cros_ec_debugfs, notifier_panic);
if (debug_info->log_buffer.buf) {
/* Force log poll work to run immediately */
mod_delayed_work(debug_info->log_poll_work.wq, &debug_info->log_poll_work, 0);
/* Block until log poll work finishes */
flush_delayed_work(&debug_info->log_poll_work);
}
return NOTIFY_DONE;
}
static int cros_ec_debugfs_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
const char *name = ec_platform->ec_name;
struct cros_ec_debugfs *debug_info;
int ret;
debug_info = devm_kzalloc(ec->dev, sizeof(*debug_info), GFP_KERNEL);
if (!debug_info)
return -ENOMEM;
debug_info->ec = ec;
debug_info->dir = debugfs_create_dir(name, NULL);
ret = cros_ec_create_panicinfo(debug_info);
if (ret)
goto remove_debugfs;
ret = cros_ec_create_console_log(debug_info);
if (ret)
goto remove_debugfs;
debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info,
&cros_ec_pdinfo_fops);
if (cros_ec_uptime_is_supported(ec->ec_dev))
debugfs_create_file("uptime", 0444, debug_info->dir, debug_info,
&cros_ec_uptime_fops);
debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
&ec->ec_dev->last_resume_result);
debugfs_create_u16("suspend_timeout_ms", 0664, debug_info->dir,
&ec->ec_dev->suspend_timeout_ms);
debug_info->notifier_panic.notifier_call = cros_ec_debugfs_panic_event;
ret = blocking_notifier_chain_register(&ec->ec_dev->panic_notifier,
&debug_info->notifier_panic);
if (ret)
goto remove_debugfs;
ec->debug_info = debug_info;
dev_set_drvdata(&pd->dev, ec);
return 0;
remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
static int cros_ec_debugfs_remove(struct platform_device *pd)
{
struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
return 0;
}
static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
if (ec->debug_info->log_buffer.buf)
cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
return 0;
}
static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
{
struct cros_ec_dev *ec = dev_get_drvdata(dev);
if (ec->debug_info->log_buffer.buf)
schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
return 0;
}
static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops,
cros_ec_debugfs_suspend, cros_ec_debugfs_resume);
static struct platform_driver cros_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_debugfs_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = cros_ec_debugfs_probe,
.remove = cros_ec_debugfs_remove,
};
module_platform_driver(cros_ec_debugfs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Debug logs for ChromeOS EC");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_ec_debugfs.c |
// SPDX-License-Identifier: GPL-2.0
// ChromeOS EC communication protocol helper functions
//
// Copyright (C) 2015 Google, Inc
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "cros_ec_trace.h"
#define EC_COMMAND_RETRIES 50
static const int cros_ec_error_map[] = {
[EC_RES_INVALID_COMMAND] = -EOPNOTSUPP,
[EC_RES_ERROR] = -EIO,
[EC_RES_INVALID_PARAM] = -EINVAL,
[EC_RES_ACCESS_DENIED] = -EACCES,
[EC_RES_INVALID_RESPONSE] = -EPROTO,
[EC_RES_INVALID_VERSION] = -ENOPROTOOPT,
[EC_RES_INVALID_CHECKSUM] = -EBADMSG,
[EC_RES_IN_PROGRESS] = -EINPROGRESS,
[EC_RES_UNAVAILABLE] = -ENODATA,
[EC_RES_TIMEOUT] = -ETIMEDOUT,
[EC_RES_OVERFLOW] = -EOVERFLOW,
[EC_RES_INVALID_HEADER] = -EBADR,
[EC_RES_REQUEST_TRUNCATED] = -EBADR,
[EC_RES_RESPONSE_TOO_BIG] = -EFBIG,
[EC_RES_BUS_ERROR] = -EFAULT,
[EC_RES_BUSY] = -EBUSY,
[EC_RES_INVALID_HEADER_VERSION] = -EBADMSG,
[EC_RES_INVALID_HEADER_CRC] = -EBADMSG,
[EC_RES_INVALID_DATA_CRC] = -EBADMSG,
[EC_RES_DUP_UNAVAILABLE] = -ENODATA,
};
static int cros_ec_map_error(uint32_t result)
{
int ret = 0;
if (result != EC_RES_SUCCESS) {
if (result < ARRAY_SIZE(cros_ec_error_map) && cros_ec_error_map[result])
ret = cros_ec_error_map[result];
else
ret = -EPROTO;
}
return ret;
}
static int prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
struct ec_host_request *request;
u8 *out;
int i;
u8 csum = 0;
if (msg->outsize + sizeof(*request) > ec_dev->dout_size)
return -EINVAL;
out = ec_dev->dout;
request = (struct ec_host_request *)out;
request->struct_version = EC_HOST_REQUEST_VERSION;
request->checksum = 0;
request->command = msg->command;
request->command_version = msg->version;
request->reserved = 0;
request->data_len = msg->outsize;
for (i = 0; i < sizeof(*request); i++)
csum += out[i];
/* Copy data and update checksum */
memcpy(out + sizeof(*request), msg->data, msg->outsize);
for (i = 0; i < msg->outsize; i++)
csum += msg->data[i];
request->checksum = -csum;
return sizeof(*request) + msg->outsize;
}
static int prepare_tx_legacy(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
u8 *out;
u8 csum;
int i;
if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE)
return -EINVAL;
out = ec_dev->dout;
out[0] = EC_CMD_VERSION0 + msg->version;
out[1] = msg->command;
out[2] = msg->outsize;
csum = out[0] + out[1] + out[2];
for (i = 0; i < msg->outsize; i++)
csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->data[i];
out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = csum;
return EC_MSG_TX_PROTO_BYTES + msg->outsize;
}
static int cros_ec_xfer_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret;
int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
if (ec_dev->proto_version > 2)
xfer_fxn = ec_dev->pkt_xfer;
else
xfer_fxn = ec_dev->cmd_xfer;
if (!xfer_fxn) {
/*
* This error can happen if a communication error happened and
* the EC is trying to use protocol v2, on an underlying
* communication mechanism that does not support v2.
*/
dev_err_once(ec_dev->dev, "missing EC transfer API, cannot send command\n");
return -EIO;
}
trace_cros_ec_request_start(msg);
ret = (*xfer_fxn)(ec_dev, msg);
trace_cros_ec_request_done(msg, ret);
return ret;
}
static int cros_ec_wait_until_complete(struct cros_ec_device *ec_dev, uint32_t *result)
{
struct {
struct cros_ec_command msg;
struct ec_response_get_comms_status status;
} __packed buf;
struct cros_ec_command *msg = &buf.msg;
struct ec_response_get_comms_status *status = &buf.status;
int ret = 0, i;
msg->version = 0;
msg->command = EC_CMD_GET_COMMS_STATUS;
msg->insize = sizeof(*status);
msg->outsize = 0;
/* Query the EC's status until it's no longer busy or we encounter an error. */
for (i = 0; i < EC_COMMAND_RETRIES; ++i) {
usleep_range(10000, 11000);
ret = cros_ec_xfer_command(ec_dev, msg);
if (ret == -EAGAIN)
continue;
if (ret < 0)
return ret;
*result = msg->result;
if (msg->result != EC_RES_SUCCESS)
return ret;
if (ret == 0) {
ret = -EPROTO;
break;
}
if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
return ret;
}
if (i >= EC_COMMAND_RETRIES)
ret = -EAGAIN;
return ret;
}
static int cros_ec_send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret = cros_ec_xfer_command(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS)
ret = cros_ec_wait_until_complete(ec_dev, &msg->result);
return ret;
}
/**
* cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
* @ec_dev: Device to register.
* @msg: Message to write.
*
* This is used by all ChromeOS EC drivers to prepare the outgoing message
* according to different protocol versions.
*
* Return: number of prepared bytes on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
if (ec_dev->proto_version > 2)
return prepare_tx(ec_dev, msg);
return prepare_tx_legacy(ec_dev, msg);
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
/**
* cros_ec_check_result() - Check ec_msg->result.
* @ec_dev: EC device.
* @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
* EC_RES_IN_PROGRESS and to warn about them.
*
* The function should not check for furthermore error codes. Otherwise,
* it would break the ABI.
*
* Return: -EAGAIN if ec_msg->result == EC_RES_IN_PROGRESS. Otherwise, 0.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
switch (msg->result) {
case EC_RES_SUCCESS:
return 0;
case EC_RES_IN_PROGRESS:
dev_dbg(ec_dev->dev, "command 0x%02x in progress\n",
msg->command);
return -EAGAIN;
default:
dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n",
msg->command, msg->result);
return 0;
}
}
EXPORT_SYMBOL(cros_ec_check_result);
/*
* cros_ec_get_host_event_wake_mask
*
* Get the mask of host events that cause wake from suspend.
*
* @ec_dev: EC device to call
* @msg: message structure to use
* @mask: result when function returns 0.
*
* LOCKING:
* the caller has ec_dev->lock mutex, or the caller knows there is
* no other command in progress.
*/
static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, uint32_t *mask)
{
struct cros_ec_command *msg;
struct ec_response_host_event_mask *r;
int ret, mapped;
msg = kzalloc(sizeof(*msg) + sizeof(*r), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK;
msg->insize = sizeof(*r);
ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0)
goto exit;
mapped = cros_ec_map_error(msg->result);
if (mapped) {
ret = mapped;
goto exit;
}
if (ret == 0) {
ret = -EPROTO;
goto exit;
}
r = (struct ec_response_host_event_mask *)msg->data;
*mask = r->mask;
ret = 0;
exit:
kfree(msg);
return ret;
}
static int cros_ec_get_proto_info(struct cros_ec_device *ec_dev, int devidx)
{
struct cros_ec_command *msg;
struct ec_response_get_protocol_info *info;
int ret, mapped;
ec_dev->proto_version = 3;
if (devidx > 0)
ec_dev->max_passthru = 0;
msg = kzalloc(sizeof(*msg) + sizeof(*info), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_PASSTHRU_OFFSET(devidx) | EC_CMD_GET_PROTOCOL_INFO;
msg->insize = sizeof(*info);
ret = cros_ec_send_command(ec_dev, msg);
/*
* Send command once again when timeout occurred.
* Fingerprint MCU (FPMCU) is restarted during system boot which
* introduces small window in which FPMCU won't respond for any
* messages sent by kernel. There is no need to wait before next
* attempt because we waited at least EC_MSG_DEADLINE_MS.
*/
if (ret == -ETIMEDOUT)
ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev,
"failed to check for EC[%d] protocol version: %d\n",
devidx, ret);
goto exit;
}
mapped = cros_ec_map_error(msg->result);
if (mapped) {
ret = mapped;
goto exit;
}
if (ret == 0) {
ret = -EPROTO;
goto exit;
}
info = (struct ec_response_get_protocol_info *)msg->data;
switch (devidx) {
case CROS_EC_DEV_EC_INDEX:
ec_dev->max_request = info->max_request_packet_size -
sizeof(struct ec_host_request);
ec_dev->max_response = info->max_response_packet_size -
sizeof(struct ec_host_response);
ec_dev->proto_version = min(EC_HOST_REQUEST_VERSION,
fls(info->protocol_versions) - 1);
ec_dev->din_size = info->max_response_packet_size + EC_MAX_RESPONSE_OVERHEAD;
ec_dev->dout_size = info->max_request_packet_size + EC_MAX_REQUEST_OVERHEAD;
dev_dbg(ec_dev->dev, "using proto v%u\n", ec_dev->proto_version);
break;
case CROS_EC_DEV_PD_INDEX:
ec_dev->max_passthru = info->max_request_packet_size -
sizeof(struct ec_host_request);
dev_dbg(ec_dev->dev, "found PD chip\n");
break;
default:
dev_dbg(ec_dev->dev, "unknown passthru index: %d\n", devidx);
break;
}
ret = 0;
exit:
kfree(msg);
return ret;
}
static int cros_ec_get_proto_info_legacy(struct cros_ec_device *ec_dev)
{
struct cros_ec_command *msg;
struct ec_params_hello *params;
struct ec_response_hello *response;
int ret, mapped;
ec_dev->proto_version = 2;
msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->command = EC_CMD_HELLO;
msg->insize = sizeof(*response);
msg->outsize = sizeof(*params);
params = (struct ec_params_hello *)msg->data;
params->in_data = 0xa0b0c0d0;
ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0) {
dev_dbg(ec_dev->dev, "EC failed to respond to v2 hello: %d\n", ret);
goto exit;
}
mapped = cros_ec_map_error(msg->result);
if (mapped) {
ret = mapped;
dev_err(ec_dev->dev, "EC responded to v2 hello with error: %d\n", msg->result);
goto exit;
}
if (ret == 0) {
ret = -EPROTO;
goto exit;
}
response = (struct ec_response_hello *)msg->data;
if (response->out_data != 0xa1b2c3d4) {
dev_err(ec_dev->dev,
"EC responded to v2 hello with bad result: %u\n",
response->out_data);
ret = -EBADMSG;
goto exit;
}
ec_dev->max_request = EC_PROTO2_MAX_PARAM_SIZE;
ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
ec_dev->max_passthru = 0;
ec_dev->pkt_xfer = NULL;
ec_dev->din_size = EC_PROTO2_MSG_BYTES;
ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
dev_dbg(ec_dev->dev, "falling back to proto v2\n");
ret = 0;
exit:
kfree(msg);
return ret;
}
/*
* cros_ec_get_host_command_version_mask
*
* Get the version mask of a given command.
*
* @ec_dev: EC device to call
* @msg: message structure to use
* @cmd: command to get the version of.
* @mask: result when function returns 0.
*
* @return 0 on success, error code otherwise
*
* LOCKING:
* the caller has ec_dev->lock mutex or the caller knows there is
* no other command in progress.
*/
static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask)
{
struct ec_params_get_cmd_versions *pver;
struct ec_response_get_cmd_versions *rver;
struct cros_ec_command *msg;
int ret, mapped;
msg = kmalloc(sizeof(*msg) + max(sizeof(*rver), sizeof(*pver)),
GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->version = 0;
msg->command = EC_CMD_GET_CMD_VERSIONS;
msg->insize = sizeof(*rver);
msg->outsize = sizeof(*pver);
pver = (struct ec_params_get_cmd_versions *)msg->data;
pver->cmd = cmd;
ret = cros_ec_send_command(ec_dev, msg);
if (ret < 0)
goto exit;
mapped = cros_ec_map_error(msg->result);
if (mapped) {
ret = mapped;
goto exit;
}
if (ret == 0) {
ret = -EPROTO;
goto exit;
}
rver = (struct ec_response_get_cmd_versions *)msg->data;
*mask = rver->version_mask;
ret = 0;
exit:
kfree(msg);
return ret;
}
/**
* cros_ec_query_all() - Query the protocol version supported by the
* ChromeOS EC.
* @ec_dev: Device to register.
*
* Return: 0 on success or negative error code.
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
u32 ver_mask;
int ret;
/* First try sending with proto v3. */
if (!cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_EC_INDEX)) {
/* Check for PD. */
cros_ec_get_proto_info(ec_dev, CROS_EC_DEV_PD_INDEX);
} else {
/* Try querying with a v2 hello message. */
ret = cros_ec_get_proto_info_legacy(ec_dev);
if (ret) {
/*
* It's possible for a test to occur too early when
* the EC isn't listening. If this happens, we'll
* test later when the first command is run.
*/
ec_dev->proto_version = EC_PROTO_VERSION_UNKNOWN;
dev_dbg(ec_dev->dev, "EC query failed: %d\n", ret);
return ret;
}
}
devm_kfree(dev, ec_dev->din);
devm_kfree(dev, ec_dev->dout);
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din) {
ret = -ENOMEM;
goto exit;
}
ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
if (!ec_dev->dout) {
devm_kfree(dev, ec_dev->din);
ret = -ENOMEM;
goto exit;
}
/* Probe if MKBP event is supported */
ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_GET_NEXT_EVENT, &ver_mask);
if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
} else {
ec_dev->mkbp_event_supported = fls(ver_mask);
dev_dbg(ec_dev->dev, "MKBP support version %u\n", ec_dev->mkbp_event_supported - 1);
}
/* Probe if host sleep v1 is supported for S0ix failure detection. */
ret = cros_ec_get_host_command_version_mask(ec_dev, EC_CMD_HOST_SLEEP_EVENT, &ver_mask);
ec_dev->host_sleep_v1 = (ret == 0 && (ver_mask & EC_VER_MASK(1)));
/* Get host event wake mask. */
ret = cros_ec_get_host_event_wake_mask(ec_dev, &ec_dev->host_event_wake_mask);
if (ret < 0) {
/*
* If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK,
* use a reasonable default. Note that we ignore various
* battery, AC status, and power-state events, because (a)
* those can be quite common (e.g., when sitting at full
* charge, on AC) and (b) these are not actionable wake events;
* if anything, we'd like to continue suspending (to save
* power), not wake up.
*/
ec_dev->host_event_wake_mask = U32_MAX &
~(EC_HOST_EVENT_MASK(EC_HOST_EVENT_LID_CLOSED) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_AC_DISCONNECTED) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_LOW) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_CRITICAL) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
EC_HOST_EVENT_MASK(EC_HOST_EVENT_BATTERY_STATUS));
/*
* Old ECs may not support this command. Complain about all
* other errors.
*/
if (ret != -EOPNOTSUPP)
dev_err(ec_dev->dev,
"failed to retrieve wake mask: %d\n", ret);
}
ret = 0;
exit:
return ret;
}
EXPORT_SYMBOL(cros_ec_query_all);
/**
* cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
* Call this to send a command to the ChromeOS EC. This should be used instead
* of calling the EC's cmd_xfer() callback directly. This function does not
* convert EC command execution error codes to Linux error codes. Most
* in-kernel users will want to use cros_ec_cmd_xfer_status() instead since
* that function implements the conversion.
*
* Return:
* >0 - EC command was executed successfully. The return value is the number
* of bytes returned by the EC (excluding the header).
* =0 - EC communication was successful. EC command execution results are
* reported in msg->result. The result will be EC_RES_SUCCESS if the
* command was executed successfully or report an EC command execution
* error.
* <0 - EC communication error. Return value is the Linux error code.
*/
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, struct cros_ec_command *msg)
{
int ret;
mutex_lock(&ec_dev->lock);
if (ec_dev->proto_version == EC_PROTO_VERSION_UNKNOWN) {
ret = cros_ec_query_all(ec_dev);
if (ret) {
dev_err(ec_dev->dev,
"EC version unknown and query failed; aborting command\n");
mutex_unlock(&ec_dev->lock);
return ret;
}
}
if (msg->insize > ec_dev->max_response) {
dev_dbg(ec_dev->dev, "clamping message receive buffer\n");
msg->insize = ec_dev->max_response;
}
if (msg->command < EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX)) {
if (msg->outsize > ec_dev->max_request) {
dev_err(ec_dev->dev,
"request of size %u is too big (max: %u)\n",
msg->outsize,
ec_dev->max_request);
mutex_unlock(&ec_dev->lock);
return -EMSGSIZE;
}
} else {
if (msg->outsize > ec_dev->max_passthru) {
dev_err(ec_dev->dev,
"passthru rq of size %u is too big (max: %u)\n",
msg->outsize,
ec_dev->max_passthru);
mutex_unlock(&ec_dev->lock);
return -EMSGSIZE;
}
}
ret = cros_ec_send_command(ec_dev, msg);
mutex_unlock(&ec_dev->lock);
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
/**
* cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
* Call this to send a command to the ChromeOS EC. This should be used instead of calling the EC's
* cmd_xfer() callback directly. It returns success status only if both the command was transmitted
* successfully and the EC replied with success status.
*
* Return:
* >=0 - The number of bytes transferred.
* <0 - Linux error code
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
int ret, mapped;
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret < 0)
return ret;
mapped = cros_ec_map_error(msg->result);
if (mapped) {
dev_dbg(ec_dev->dev, "Command result (err: %d [%d])\n",
msg->result, mapped);
ret = mapped;
}
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
static int get_next_event_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg,
struct ec_response_get_next_event_v1 *event,
int version, uint32_t size)
{
int ret;
msg->version = version;
msg->command = EC_CMD_GET_NEXT_EVENT;
msg->insize = size;
msg->outsize = 0;
ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
ec_dev->event_data = *event;
}
return ret;
}
static int get_next_event(struct cros_ec_device *ec_dev)
{
struct {
struct cros_ec_command msg;
struct ec_response_get_next_event_v1 event;
} __packed buf;
struct cros_ec_command *msg = &buf.msg;
struct ec_response_get_next_event_v1 *event = &buf.event;
const int cmd_version = ec_dev->mkbp_event_supported - 1;
memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
dev_dbg(ec_dev->dev, "Device suspended.\n");
return -EHOSTDOWN;
}
if (cmd_version == 0)
return get_next_event_xfer(ec_dev, msg, event, 0,
sizeof(struct ec_response_get_next_event));
return get_next_event_xfer(ec_dev, msg, event, cmd_version,
sizeof(struct ec_response_get_next_event_v1));
}
static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
{
u8 buffer[sizeof(struct cros_ec_command) +
sizeof(ec_dev->event_data.data)];
struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
msg->version = 0;
msg->command = EC_CMD_MKBP_STATE;
msg->insize = sizeof(ec_dev->event_data.data);
msg->outsize = 0;
ec_dev->event_size = cros_ec_cmd_xfer_status(ec_dev, msg);
ec_dev->event_data.event_type = EC_MKBP_EVENT_KEY_MATRIX;
memcpy(&ec_dev->event_data.data, msg->data,
sizeof(ec_dev->event_data.data));
return ec_dev->event_size;
}
/**
* cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
* @ec_dev: Device to fetch event from.
* @wake_event: Pointer to a bool set to true upon return if the event might be
* treated as a wake event. Ignored if null.
* @has_more_events: Pointer to bool set to true if more than one event is
* pending.
* Some EC will set this flag to indicate cros_ec_get_next_event()
* can be called multiple times in a row.
* It is an optimization to prevent issuing a EC command for
* nothing or wait for another interrupt from the EC to process
* the next message.
* Ignored if null.
*
* Return: negative error code on errors; 0 for no data; or else number of
* bytes received (i.e., an event was retrieved successfully). Event types are
* written out to @ec_dev->event_data.event_type on success.
*/
int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
bool *wake_event,
bool *has_more_events)
{
u8 event_type;
u32 host_event;
int ret;
u32 ver_mask;
/*
* Default value for wake_event.
* Wake up on keyboard event, wake up for spurious interrupt or link
* error to the EC.
*/
if (wake_event)
*wake_event = true;
/*
* Default value for has_more_events.
* EC will raise another interrupt if AP does not process all events
* anyway.
*/
if (has_more_events)
*has_more_events = false;
if (!ec_dev->mkbp_event_supported)
return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
/*
* -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION.
* This can occur when EC based device (e.g. Fingerprint MCU) jumps to
* the RO image which doesn't support newer version of the command. In
* this case we will attempt to update maximum supported version of the
* EC_CMD_GET_NEXT_EVENT.
*/
if (ret == -ENOPROTOOPT) {
dev_dbg(ec_dev->dev,
"GET_NEXT_EVENT returned invalid version error.\n");
ret = cros_ec_get_host_command_version_mask(ec_dev,
EC_CMD_GET_NEXT_EVENT,
&ver_mask);
if (ret < 0 || ver_mask == 0)
/*
* Do not change the MKBP supported version if we can't
* obtain supported version correctly. Please note that
* calling EC_CMD_GET_NEXT_EVENT returned
* EC_RES_INVALID_VERSION which means that the command
* is present.
*/
return -ENOPROTOOPT;
ec_dev->mkbp_event_supported = fls(ver_mask);
dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n",
ec_dev->mkbp_event_supported - 1);
/* Try to get next event with new MKBP support version set. */
ret = get_next_event(ec_dev);
}
if (ret <= 0)
return ret;
if (has_more_events)
*has_more_events = ec_dev->event_data.event_type &
EC_MKBP_HAS_MORE_EVENTS;
ec_dev->event_data.event_type &= EC_MKBP_EVENT_TYPE_MASK;
if (wake_event) {
event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
/*
* Sensor events need to be parsed by the sensor sub-device.
* Defer them, and don't report the wakeup here.
*/
if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) {
*wake_event = false;
} else if (host_event) {
/* rtc_update_irq() already handles wakeup events. */
if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC))
*wake_event = false;
/* Masked host-events should not count as wake events. */
if (!(host_event & ec_dev->host_event_wake_mask))
*wake_event = false;
}
}
return ret;
}
EXPORT_SYMBOL(cros_ec_get_next_event);
/**
* cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
* @ec_dev: Device to fetch event from.
*
* When MKBP is supported, when the EC raises an interrupt, we collect the
* events raised and call the functions in the ec notifier. This function
* is a helper to know which events are raised.
*
* Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
*/
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
{
u32 host_event;
if (!ec_dev->mkbp_event_supported)
return 0;
if (ec_dev->event_data.event_type != EC_MKBP_EVENT_HOST_EVENT)
return 0;
if (ec_dev->event_size != sizeof(host_event)) {
dev_warn(ec_dev->dev, "Invalid host event size\n");
return 0;
}
host_event = get_unaligned_le32(&ec_dev->event_data.data.host_event);
return host_event;
}
EXPORT_SYMBOL(cros_ec_get_host_event);
/**
* cros_ec_check_features() - Test for the presence of EC features
*
* @ec: EC device, does not have to be connected directly to the AP,
* can be daisy chained through another device.
* @feature: One of ec_feature_code bit.
*
* Call this function to test whether the ChromeOS EC supports a feature.
*
* Return: true if supported, false if not (or if an error was encountered).
*/
bool cros_ec_check_features(struct cros_ec_dev *ec, int feature)
{
struct ec_response_get_features *features = &ec->features;
int ret;
if (features->flags[0] == -1U && features->flags[1] == -1U) {
/* features bitmap not read yet */
ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_GET_FEATURES + ec->cmd_offset,
NULL, 0, features, sizeof(*features));
if (ret < 0) {
dev_warn(ec->dev, "cannot get EC features: %d\n", ret);
memset(features, 0, sizeof(*features));
}
dev_dbg(ec->dev, "EC features %08x %08x\n",
features->flags[0], features->flags[1]);
}
return !!(features->flags[feature / 32] & EC_FEATURE_MASK_0(feature));
}
EXPORT_SYMBOL_GPL(cros_ec_check_features);
/**
* cros_ec_get_sensor_count() - Return the number of MEMS sensors supported.
*
* @ec: EC device, does not have to be connected directly to the AP,
* can be daisy chained through another device.
* Return: < 0 in case of error.
*/
int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
{
/*
* Issue a command to get the number of sensor reported.
* If not supported, check for legacy mode.
*/
int ret, sensor_count;
struct ec_params_motion_sense *params;
struct ec_response_motion_sense *resp;
struct cros_ec_command *msg;
struct cros_ec_device *ec_dev = ec->ec_dev;
u8 status;
msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->version = 1;
msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
msg->outsize = sizeof(*params);
msg->insize = sizeof(*resp);
params = (struct ec_params_motion_sense *)msg->data;
params->cmd = MOTIONSENSE_CMD_DUMP;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
sensor_count = ret;
} else {
resp = (struct ec_response_motion_sense *)msg->data;
sensor_count = resp->dump.sensor_count;
}
kfree(msg);
/*
* Check legacy mode: Let's find out if sensors are accessible
* via LPC interface.
*/
if (sensor_count < 0 && ec->cmd_offset == 0 && ec_dev->cmd_readmem) {
ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
1, &status);
if (ret >= 0 &&
(status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
/*
* We have 2 sensors, one in the lid, one in the base.
*/
sensor_count = 2;
} else {
/*
* EC uses LPC interface and no sensors are presented.
*/
sensor_count = 0;
}
}
return sensor_count;
}
EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
/**
* cros_ec_cmd - Send a command to the EC.
*
* @ec_dev: EC device
* @version: EC command version
* @command: EC command
* @outdata: EC command output data
* @outsize: Size of outdata
* @indata: EC command input data
* @insize: Size of indata
*
* Return: >= 0 on success, negative error number on failure.
*/
int cros_ec_cmd(struct cros_ec_device *ec_dev,
unsigned int version,
int command,
void *outdata,
size_t outsize,
void *indata,
size_t insize)
{
struct cros_ec_command *msg;
int ret;
msg = kzalloc(sizeof(*msg) + max(insize, outsize), GFP_KERNEL);
if (!msg)
return -ENOMEM;
msg->version = version;
msg->command = command;
msg->outsize = outsize;
msg->insize = insize;
if (outsize)
memcpy(msg->data, outdata, outsize);
ret = cros_ec_cmd_xfer_status(ec_dev, msg);
if (ret < 0)
goto error;
if (insize)
memcpy(indata, msg->data, insize);
error:
kfree(msg);
return ret;
}
EXPORT_SYMBOL_GPL(cros_ec_cmd);
| linux-master | drivers/platform/chrome/cros_ec_proto.c |
// SPDX-License-Identifier: GPL-2.0
// LPC interface for ChromeOS Embedded Controller
//
// Copyright (C) 2012-2015 Google, Inc
//
// This driver uses the ChromeOS EC byte-level message-based protocol for
// communicating the keyboard state (which keys are pressed) from a keyboard EC
// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
// but everything else (including deghosting) is done here. The main
// motivation for this is to keep the EC firmware as simple as possible, since
// it cannot be easily upgraded and EC flash/IRAM space is relatively
// expensive.
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/suspend.h>
#include "cros_ec.h"
#include "cros_ec_lpc_mec.h"
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
/**
* struct lpc_driver_ops - LPC driver operations
* @read: Copy length bytes from EC address offset into buffer dest. Returns
* the 8-bit checksum of all bytes read.
* @write: Copy length bytes from buffer msg into EC address offset. Returns
* the 8-bit checksum of all bytes written.
*/
struct lpc_driver_ops {
u8 (*read)(unsigned int offset, unsigned int length, u8 *dest);
u8 (*write)(unsigned int offset, unsigned int length, const u8 *msg);
};
static struct lpc_driver_ops cros_ec_lpc_ops = { };
/*
* A generic instance of the read function of struct lpc_driver_ops, used for
* the LPC EC.
*/
static u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length,
u8 *dest)
{
int sum = 0;
int i;
for (i = 0; i < length; ++i) {
dest[i] = inb(offset + i);
sum += dest[i];
}
/* Return checksum of all bytes read */
return sum;
}
/*
* A generic instance of the write function of struct lpc_driver_ops, used for
* the LPC EC.
*/
static u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length,
const u8 *msg)
{
int sum = 0;
int i;
for (i = 0; i < length; ++i) {
outb(msg[i], offset + i);
sum += msg[i];
}
/* Return checksum of all bytes written */
return sum;
}
/*
* An instance of the read function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
static u8 cros_ec_lpc_mec_read_bytes(unsigned int offset, unsigned int length,
u8 *dest)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
if (in_range < 0)
return 0;
return in_range ?
cros_ec_lpc_io_bytes_mec(MEC_IO_READ,
offset - EC_HOST_CMD_REGION0,
length, dest) :
cros_ec_lpc_read_bytes(offset, length, dest);
}
/*
* An instance of the write function of struct lpc_driver_ops, used for the
* MEC variant of LPC EC.
*/
static u8 cros_ec_lpc_mec_write_bytes(unsigned int offset, unsigned int length,
const u8 *msg)
{
int in_range = cros_ec_lpc_mec_in_range(offset, length);
if (in_range < 0)
return 0;
return in_range ?
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE,
offset - EC_HOST_CMD_REGION0,
length, (u8 *)msg) :
cros_ec_lpc_write_bytes(offset, length, msg);
}
static int ec_response_timed_out(void)
{
unsigned long one_second = jiffies + HZ;
u8 data;
usleep_range(200, 300);
do {
if (!(cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_CMD, 1, &data) &
EC_LPC_STATUS_BUSY_MASK))
return 0;
usleep_range(100, 200);
} while (time_before(jiffies, one_second));
return 1;
}
static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
struct ec_host_response response;
u8 sum;
int ret = 0;
u8 *dout;
ret = cros_ec_prepare_tx(ec, msg);
if (ret < 0)
goto done;
/* Write buffer */
cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ec_response_timed_out()) {
dev_warn(ec->dev, "EC response timed out\n");
ret = -EIO;
goto done;
}
/* Check result */
msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
ret = cros_ec_check_result(ec, msg);
if (ret)
goto done;
/* Read back response */
dout = (u8 *)&response;
sum = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET, sizeof(response),
dout);
msg->result = response.result;
if (response.data_len > msg->insize) {
dev_err(ec->dev,
"packet too long (%d bytes, expected %d)",
response.data_len, msg->insize);
ret = -EMSGSIZE;
goto done;
}
/* Read response and process checksum */
sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PACKET +
sizeof(response), response.data_len,
msg->data);
if (sum) {
dev_err(ec->dev,
"bad packet checksum %02x\n",
response.checksum);
ret = -EBADMSG;
goto done;
}
/* Return actual amount of data received */
ret = response.data_len;
done:
return ret;
}
static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
struct ec_lpc_host_args args;
u8 sum;
int ret = 0;
if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE ||
msg->insize > EC_PROTO2_MAX_PARAM_SIZE) {
dev_err(ec->dev,
"invalid buffer sizes (out %d, in %d)\n",
msg->outsize, msg->insize);
return -EINVAL;
}
/* Now actually send the command to the EC and get the result */
args.flags = EC_HOST_ARGS_FLAG_FROM_HOST;
args.command_version = msg->version;
args.data_size = msg->outsize;
/* Initialize checksum */
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Copy data and update checksum */
sum += cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_PARAM, msg->outsize,
msg->data);
/* Finalize checksum and write args */
args.checksum = sum;
cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_ARGS, sizeof(args),
(u8 *)&args);
/* Here we go */
sum = msg->command;
cros_ec_lpc_ops.write(EC_LPC_ADDR_HOST_CMD, 1, &sum);
if (ec_response_timed_out()) {
dev_warn(ec->dev, "EC response timed out\n");
ret = -EIO;
goto done;
}
/* Check result */
msg->result = cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_DATA, 1, &sum);
ret = cros_ec_check_result(ec, msg);
if (ret)
goto done;
/* Read back args */
cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_ARGS, sizeof(args), (u8 *)&args);
if (args.data_size > msg->insize) {
dev_err(ec->dev,
"packet too long (%d bytes, expected %d)",
args.data_size, msg->insize);
ret = -ENOSPC;
goto done;
}
/* Start calculating response checksum */
sum = msg->command + args.flags + args.command_version + args.data_size;
/* Read response and update checksum */
sum += cros_ec_lpc_ops.read(EC_LPC_ADDR_HOST_PARAM, args.data_size,
msg->data);
/* Verify checksum */
if (args.checksum != sum) {
dev_err(ec->dev,
"bad packet checksum, expected %02x, got %02x\n",
args.checksum, sum);
ret = -EBADMSG;
goto done;
}
/* Return actual amount of data received */
ret = args.data_size;
done:
return ret;
}
/* Returns num bytes read, or negative on error. Doesn't need locking. */
static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest)
{
int i = offset;
char *s = dest;
int cnt = 0;
if (offset >= EC_MEMMAP_SIZE - bytes)
return -EINVAL;
/* fixed length */
if (bytes) {
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + offset, bytes, s);
return bytes;
}
/* string */
for (; i < EC_MEMMAP_SIZE; i++, s++) {
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + i, 1, s);
cnt++;
if (!*s)
break;
}
return cnt;
}
static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
{
static const char *env[] = { "ERROR=PANIC", NULL };
struct cros_ec_device *ec_dev = data;
bool ec_has_more_events;
int ret;
ec_dev->last_event_time = cros_ec_get_time_ns();
if (value == ACPI_NOTIFY_CROS_EC_PANIC) {
dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!");
blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
/* Begin orderly shutdown. EC will force reset after a short period. */
hw_protection_shutdown("CrOS EC Panic", -1);
/* Do not query for other events after a panic is reported */
return;
}
if (ec_dev->mkbp_event_supported)
do {
ret = cros_ec_get_next_event(ec_dev, NULL,
&ec_has_more_events);
if (ret > 0)
blocking_notifier_call_chain(
&ec_dev->event_notifier, 0,
ec_dev);
} while (ec_has_more_events);
if (value == ACPI_NOTIFY_DEVICE_WAKE)
pm_system_wakeup();
}
static int cros_ec_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct acpi_device *adev;
acpi_status status;
struct cros_ec_device *ec_dev;
u8 buf[2] = {};
int irq, ret;
/*
* The Framework Laptop (and possibly other non-ChromeOS devices)
* only exposes the eight I/O ports that are required for the Microchip EC.
* Requesting a larger reservation will fail.
*/
if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
EC_HOST_CMD_MEC_REGION_SIZE, dev_name(dev))) {
dev_err(dev, "couldn't reserve MEC region\n");
return -EBUSY;
}
cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0,
EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE);
/*
* Read the mapped ID twice, the first one is assuming the
* EC is a Microchip Embedded Controller (MEC) variant, if the
* protocol fails, fallback to the non MEC variant and try to
* read again the ID.
*/
cros_ec_lpc_ops.read = cros_ec_lpc_mec_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_mec_write_bytes;
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf);
if (buf[0] != 'E' || buf[1] != 'C') {
if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
dev_name(dev))) {
dev_err(dev, "couldn't reserve memmap region\n");
return -EBUSY;
}
/* Re-assign read/write operations for the non MEC variant */
cros_ec_lpc_ops.read = cros_ec_lpc_read_bytes;
cros_ec_lpc_ops.write = cros_ec_lpc_write_bytes;
cros_ec_lpc_ops.read(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2,
buf);
if (buf[0] != 'E' || buf[1] != 'C') {
dev_err(dev, "EC ID not detected\n");
return -ENODEV;
}
/* Reserve the remaining I/O ports required by the non-MEC protocol. */
if (!devm_request_region(dev, EC_HOST_CMD_REGION0 + EC_HOST_CMD_MEC_REGION_SIZE,
EC_HOST_CMD_REGION_SIZE - EC_HOST_CMD_MEC_REGION_SIZE,
dev_name(dev))) {
dev_err(dev, "couldn't reserve remainder of region0\n");
return -EBUSY;
}
if (!devm_request_region(dev, EC_HOST_CMD_REGION1,
EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
dev_err(dev, "couldn't reserve region1\n");
return -EBUSY;
}
}
ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
if (!ec_dev)
return -ENOMEM;
platform_set_drvdata(pdev, ec_dev);
ec_dev->dev = dev;
ec_dev->phys_name = dev_name(dev);
ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_lpc;
ec_dev->cmd_readmem = cros_ec_lpc_readmem;
ec_dev->din_size = sizeof(struct ec_host_response) +
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
/*
* Some boards do not have an IRQ allotted for cros_ec_lpc,
* which makes ENXIO an expected (and safe) scenario.
*/
irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
ec_dev->irq = irq;
else if (irq != -ENXIO) {
dev_err(dev, "couldn't retrieve IRQ number (%d)\n", irq);
return irq;
}
ret = cros_ec_register(ec_dev);
if (ret) {
dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
return ret;
}
/*
* Connect a notify handler to process MKBP messages if we have a
* companion ACPI device.
*/
adev = ACPI_COMPANION(dev);
if (adev) {
status = acpi_install_notify_handler(adev->handle,
ACPI_ALL_NOTIFY,
cros_ec_lpc_acpi_notify,
ec_dev);
if (ACPI_FAILURE(status))
dev_warn(dev, "Failed to register notifier %08x\n",
status);
}
return 0;
}
static int cros_ec_lpc_remove(struct platform_device *pdev)
{
struct cros_ec_device *ec_dev = platform_get_drvdata(pdev);
struct acpi_device *adev;
adev = ACPI_COMPANION(&pdev->dev);
if (adev)
acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
cros_ec_lpc_acpi_notify);
cros_ec_unregister(ec_dev);
return 0;
}
static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
{ ACPI_DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
* Today all Chromebooks/boxes ship with Google_* as version and
* coreboot as bios vendor. No other systems with this
* combination are known to date.
*/
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
},
},
{
/*
* If the box is running custom coreboot firmware then the
* DMI BIOS version string will not be matched by "Google_",
* but the system vendor string will still be matched by
* "GOOGLE".
*/
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
},
},
{
/* x86-link, the Chromebook Pixel. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
},
{
/* x86-samus, the Chromebook Pixel 2. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
},
},
{
/* x86-peppy, the Acer C720 Chromebook. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
},
},
{
/* x86-glimmer, the Lenovo Thinkpad Yoga 11e. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Glimmer"),
},
},
/* A small number of non-Chromebook/box machines also use the ChromeOS EC */
{
/* the Framework Laptop */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
DMI_MATCH(DMI_PRODUCT_NAME, "Laptop"),
},
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
#ifdef CONFIG_PM_SLEEP
static int cros_ec_lpc_prepare(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
return cros_ec_suspend(ec_dev);
}
static void cros_ec_lpc_complete(struct device *dev)
{
struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
cros_ec_resume(ec_dev);
}
#endif
static const struct dev_pm_ops cros_ec_lpc_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.prepare = cros_ec_lpc_prepare,
.complete = cros_ec_lpc_complete
#endif
};
static struct platform_driver cros_ec_lpc_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = cros_ec_lpc_acpi_device_ids,
.pm = &cros_ec_lpc_pm_ops,
/*
* ACPI child devices may probe before us, and they racily
* check our drvdata pointer. Force synchronous probe until
* those races are resolved.
*/
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = cros_ec_lpc_probe,
.remove = cros_ec_lpc_remove,
};
static struct platform_device cros_ec_lpc_device = {
.name = DRV_NAME
};
static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
*(bool *)context = true;
return AE_CTRL_TERMINATE;
}
static int __init cros_ec_lpc_init(void)
{
int ret;
acpi_status status;
status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
&cros_ec_lpc_acpi_device_found, NULL);
if (ACPI_FAILURE(status))
pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
if (!cros_ec_lpc_acpi_device_found &&
!dmi_check_system(cros_ec_lpc_dmi_table)) {
pr_err(DRV_NAME ": unsupported system.\n");
return -ENODEV;
}
/* Register the driver */
ret = platform_driver_register(&cros_ec_lpc_driver);
if (ret) {
pr_err(DRV_NAME ": can't register driver: %d\n", ret);
return ret;
}
if (!cros_ec_lpc_acpi_device_found) {
/* Register the device, and it'll get hooked up automatically */
ret = platform_device_register(&cros_ec_lpc_device);
if (ret) {
pr_err(DRV_NAME ": can't register device: %d\n", ret);
platform_driver_unregister(&cros_ec_lpc_driver);
}
}
return ret;
}
static void __exit cros_ec_lpc_exit(void)
{
if (!cros_ec_lpc_acpi_device_found)
platform_device_unregister(&cros_ec_lpc_device);
platform_driver_unregister(&cros_ec_lpc_driver);
}
module_init(cros_ec_lpc_init);
module_exit(cros_ec_lpc_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS EC LPC driver");
| linux-master | drivers/platform/chrome/cros_ec_lpc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* USB Power Delivery Vendor Defined Message (VDM) support code.
*
* Copyright 2023 Google LLC
* Author: Prashant Malani <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/usb/pd_vdo.h>
#include "cros_ec_typec.h"
#include "cros_typec_vdm.h"
/*
* Retrieves pending VDM attention messages from the EC and forwards them to the altmode driver
* based on SVID.
*/
void cros_typec_handle_vdm_attention(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_vdm_response resp;
struct ec_params_typec_vdm_response req = {
.port = port_num,
};
struct typec_altmode *amode;
u16 svid;
u32 hdr;
int ret;
do {
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_VDM_RESPONSE, &req,
sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed VDM response fetch, port: %d\n", port_num);
return;
}
hdr = resp.vdm_response[0];
svid = PD_VDO_VID(hdr);
dev_dbg(typec->dev, "Received VDM Attention header: %x, port: %d\n", hdr, port_num);
amode = typec_match_altmode(typec->ports[port_num]->port_altmode,
CROS_EC_ALTMODE_MAX, svid, PD_VDO_OPOS(hdr));
if (!amode) {
dev_err(typec->dev,
"Received VDM for unregistered altmode (SVID:%x), port: %d\n",
svid, port_num);
return;
}
typec_altmode_attention(amode, resp.vdm_attention[1]);
} while (resp.vdm_attention_left);
}
/*
* Retrieves a VDM response from the EC and forwards it to the altmode driver based on SVID.
*/
void cros_typec_handle_vdm_response(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_vdm_response resp;
struct ec_params_typec_vdm_response req = {
.port = port_num,
};
struct typec_altmode *amode;
u16 svid;
u32 hdr;
int ret;
ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_VDM_RESPONSE, &req,
sizeof(req), &resp, sizeof(resp));
if (ret < 0) {
dev_warn(typec->dev, "Failed VDM response fetch, port: %d\n", port_num);
return;
}
hdr = resp.vdm_response[0];
svid = PD_VDO_VID(hdr);
dev_dbg(typec->dev, "Received VDM header: %x, port: %d\n", hdr, port_num);
amode = typec_match_altmode(typec->ports[port_num]->port_altmode, CROS_EC_ALTMODE_MAX,
svid, PD_VDO_OPOS(hdr));
if (!amode) {
dev_err(typec->dev, "Received VDM for unregistered altmode (SVID:%x), port: %d\n",
svid, port_num);
return;
}
ret = typec_altmode_vdm(amode, hdr, &resp.vdm_response[1], resp.vdm_data_objects);
if (ret)
dev_err(typec->dev, "Failed to forward VDM to altmode (SVID:%x), port: %d\n",
svid, port_num);
}
static int cros_typec_port_amode_enter(struct typec_altmode *amode, u32 *vdo)
{
struct cros_typec_port *port = typec_altmode_get_drvdata(amode);
struct ec_params_typec_control req = {
.port = port->port_num,
.command = TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
};
struct typec_vdm_req vdm_req = {};
u32 hdr;
hdr = VDO(amode->svid, 1, SVDM_VER_2_0, CMD_ENTER_MODE);
hdr |= VDO_OPOS(amode->mode);
vdm_req.vdm_data[0] = hdr;
vdm_req.vdm_data_objects = 1;
vdm_req.partner_type = TYPEC_PARTNER_SOP;
req.vdm_req_params = vdm_req;
dev_dbg(port->typec_data->dev, "Sending EnterMode VDM, hdr: %x, port: %d\n",
hdr, port->port_num);
return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
sizeof(req), NULL, 0);
}
static int cros_typec_port_amode_vdm(struct typec_altmode *amode, const u32 hdr,
const u32 *vdo, int cnt)
{
struct cros_typec_port *port = typec_altmode_get_drvdata(amode);
struct ec_params_typec_control req = {
.port = port->port_num,
.command = TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
};
struct typec_vdm_req vdm_req = {};
int i;
vdm_req.vdm_data[0] = hdr;
vdm_req.vdm_data_objects = cnt;
for (i = 1; i < cnt; i++)
vdm_req.vdm_data[i] = vdo[i-1];
vdm_req.partner_type = TYPEC_PARTNER_SOP;
req.vdm_req_params = vdm_req;
dev_dbg(port->typec_data->dev, "Sending VDM, hdr: %x, num_objects: %d, port: %d\n",
hdr, cnt, port->port_num);
return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
sizeof(req), NULL, 0);
}
struct typec_altmode_ops port_amode_ops = {
.enter = cros_typec_port_amode_enter,
.vdm = cros_typec_port_amode_vdm,
};
| linux-master | drivers/platform/chrome/cros_typec_vdm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Logging driver for ChromeOS EC based USBPD Charger.
*
* Copyright 2018 Google LLC.
*/
#include <linux/ktime.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#define DRV_NAME "cros-usbpd-logger"
#define CROS_USBPD_MAX_LOG_ENTRIES 30
#define CROS_USBPD_LOG_UPDATE_DELAY msecs_to_jiffies(60000)
#define CROS_USBPD_DATA_SIZE 16
#define CROS_USBPD_LOG_RESP_SIZE (sizeof(struct ec_response_pd_log) + \
CROS_USBPD_DATA_SIZE)
#define CROS_USBPD_BUFFER_SIZE (sizeof(struct cros_ec_command) + \
CROS_USBPD_LOG_RESP_SIZE)
/* Buffer for building the PDLOG string */
#define BUF_SIZE 80
struct logger_data {
struct device *dev;
struct cros_ec_dev *ec_dev;
u8 ec_buffer[CROS_USBPD_BUFFER_SIZE];
struct delayed_work log_work;
struct workqueue_struct *log_workqueue;
};
static const char * const chg_type_names[] = {
"None", "PD", "Type-C", "Proprietary", "DCP", "CDP", "SDP",
"Other", "VBUS"
};
static const char * const role_names[] = {
"Disconnected", "SRC", "SNK", "SNK (not charging)"
};
static const char * const fault_names[] = {
"---", "OCP", "fast OCP", "OVP", "Discharge"
};
__printf(3, 4)
static int append_str(char *buf, int pos, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf + pos, BUF_SIZE - pos, fmt, args);
va_end(args);
return i;
}
static struct ec_response_pd_log *ec_get_log_entry(struct logger_data *logger)
{
struct cros_ec_dev *ec_dev = logger->ec_dev;
struct cros_ec_command *msg;
int ret;
msg = (struct cros_ec_command *)logger->ec_buffer;
msg->command = ec_dev->cmd_offset + EC_CMD_PD_GET_LOG_ENTRY;
msg->insize = CROS_USBPD_LOG_RESP_SIZE;
ret = cros_ec_cmd_xfer_status(ec_dev->ec_dev, msg);
if (ret < 0)
return ERR_PTR(ret);
return (struct ec_response_pd_log *)msg->data;
}
static void cros_usbpd_print_log_entry(struct ec_response_pd_log *r,
ktime_t tstamp)
{
const char *fault, *role, *chg_type;
struct usb_chg_measures *meas;
struct mcdp_info *minfo;
int role_idx, type_idx;
char buf[BUF_SIZE + 1];
struct rtc_time rt;
int len = 0;
s32 rem;
int i;
/* The timestamp is the number of 1024th of seconds in the past */
tstamp = ktime_sub_us(tstamp, r->timestamp << PD_LOG_TIMESTAMP_SHIFT);
rt = rtc_ktime_to_tm(tstamp);
switch (r->type) {
case PD_EVENT_MCU_CHARGE:
if (r->data & CHARGE_FLAGS_OVERRIDE)
len += append_str(buf, len, "override ");
if (r->data & CHARGE_FLAGS_DELAYED_OVERRIDE)
len += append_str(buf, len, "pending_override ");
role_idx = r->data & CHARGE_FLAGS_ROLE_MASK;
role = role_idx < ARRAY_SIZE(role_names) ?
role_names[role_idx] : "Unknown";
type_idx = (r->data & CHARGE_FLAGS_TYPE_MASK)
>> CHARGE_FLAGS_TYPE_SHIFT;
chg_type = type_idx < ARRAY_SIZE(chg_type_names) ?
chg_type_names[type_idx] : "???";
if (role_idx == USB_PD_PORT_POWER_DISCONNECTED ||
role_idx == USB_PD_PORT_POWER_SOURCE) {
len += append_str(buf, len, "%s", role);
break;
}
meas = (struct usb_chg_measures *)r->payload;
len += append_str(buf, len, "%s %s %s %dmV max %dmV / %dmA",
role, r->data & CHARGE_FLAGS_DUAL_ROLE ?
"DRP" : "Charger",
chg_type, meas->voltage_now,
meas->voltage_max, meas->current_max);
break;
case PD_EVENT_ACC_RW_FAIL:
len += append_str(buf, len, "RW signature check failed");
break;
case PD_EVENT_PS_FAULT:
fault = r->data < ARRAY_SIZE(fault_names) ? fault_names[r->data]
: "???";
len += append_str(buf, len, "Power supply fault: %s", fault);
break;
case PD_EVENT_VIDEO_DP_MODE:
len += append_str(buf, len, "DP mode %sabled", r->data == 1 ?
"en" : "dis");
break;
case PD_EVENT_VIDEO_CODEC:
minfo = (struct mcdp_info *)r->payload;
len += append_str(buf, len, "HDMI info: family:%04x chipid:%04x ",
MCDP_FAMILY(minfo->family),
MCDP_CHIPID(minfo->chipid));
len += append_str(buf, len, "irom:%d.%d.%d fw:%d.%d.%d",
minfo->irom.major, minfo->irom.minor,
minfo->irom.build, minfo->fw.major,
minfo->fw.minor, minfo->fw.build);
break;
default:
len += append_str(buf, len, "Event %02x (%04x) [", r->type,
r->data);
for (i = 0; i < PD_LOG_SIZE(r->size_port); i++)
len += append_str(buf, len, "%02x ", r->payload[i]);
len += append_str(buf, len, "]");
break;
}
div_s64_rem(ktime_to_ms(tstamp), MSEC_PER_SEC, &rem);
pr_info("PDLOG %d/%02d/%02d %02d:%02d:%02d.%03d P%d %s\n",
rt.tm_year + 1900, rt.tm_mon + 1, rt.tm_mday,
rt.tm_hour, rt.tm_min, rt.tm_sec, rem,
PD_LOG_PORT(r->size_port), buf);
}
static void cros_usbpd_log_check(struct work_struct *work)
{
struct logger_data *logger = container_of(to_delayed_work(work),
struct logger_data,
log_work);
struct device *dev = logger->dev;
struct ec_response_pd_log *r;
int entries = 0;
ktime_t now;
while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) {
r = ec_get_log_entry(logger);
now = ktime_get_real();
if (IS_ERR(r)) {
dev_dbg(dev, "Cannot get PD log %ld\n", PTR_ERR(r));
break;
}
if (r->type == PD_EVENT_NO_ENTRY)
break;
cros_usbpd_print_log_entry(r, now);
}
queue_delayed_work(logger->log_workqueue, &logger->log_work,
CROS_USBPD_LOG_UPDATE_DELAY);
}
static int cros_usbpd_logger_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent);
struct device *dev = &pd->dev;
struct logger_data *logger;
logger = devm_kzalloc(dev, sizeof(*logger), GFP_KERNEL);
if (!logger)
return -ENOMEM;
logger->dev = dev;
logger->ec_dev = ec_dev;
platform_set_drvdata(pd, logger);
/* Retrieve PD event logs periodically */
INIT_DELAYED_WORK(&logger->log_work, cros_usbpd_log_check);
logger->log_workqueue = create_singlethread_workqueue("cros_usbpd_log");
if (!logger->log_workqueue)
return -ENOMEM;
queue_delayed_work(logger->log_workqueue, &logger->log_work,
CROS_USBPD_LOG_UPDATE_DELAY);
return 0;
}
static int cros_usbpd_logger_remove(struct platform_device *pd)
{
struct logger_data *logger = platform_get_drvdata(pd);
cancel_delayed_work_sync(&logger->log_work);
destroy_workqueue(logger->log_workqueue);
return 0;
}
static int __maybe_unused cros_usbpd_logger_resume(struct device *dev)
{
struct logger_data *logger = dev_get_drvdata(dev);
queue_delayed_work(logger->log_workqueue, &logger->log_work,
CROS_USBPD_LOG_UPDATE_DELAY);
return 0;
}
static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
{
struct logger_data *logger = dev_get_drvdata(dev);
cancel_delayed_work_sync(&logger->log_work);
return 0;
}
static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
cros_usbpd_logger_resume);
static struct platform_driver cros_usbpd_logger_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_usbpd_logger_pm_ops,
},
.probe = cros_usbpd_logger_probe,
.remove = cros_usbpd_logger_remove,
};
module_platform_driver(cros_usbpd_logger_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/cros_usbpd_logger.c |
// SPDX-License-Identifier: GPL-2.0+
// Driver to instantiate Chromebook i2c/smbus devices.
//
// Copyright (C) 2012 Google, Inc.
// Author: Benson Leung <[email protected]>
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#define ATMEL_TP_I2C_ADDR 0x4b
#define ATMEL_TP_I2C_BL_ADDR 0x25
#define ATMEL_TS_I2C_ADDR 0x4a
#define ATMEL_TS_I2C_BL_ADDR 0x26
#define CYAPA_TP_I2C_ADDR 0x67
#define ELAN_TP_I2C_ADDR 0x15
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
"Synopsys DesignWare I2C adapter",
};
/* Keep this enum consistent with i2c_adapter_names */
enum i2c_adapter_type {
I2C_ADAPTER_SMBUS = 0,
I2C_ADAPTER_VGADDC,
I2C_ADAPTER_PANEL,
I2C_ADAPTER_DESIGNWARE,
};
struct i2c_peripheral {
struct i2c_board_info board_info;
unsigned short alt_addr;
const char *dmi_name;
unsigned long irqflags;
struct resource irq_resource;
enum i2c_adapter_type type;
u32 pci_devid;
const struct property_entry *properties;
struct i2c_client *client;
};
struct acpi_peripheral {
char hid[ACPI_ID_LEN];
struct software_node swnode;
struct i2c_client *client;
};
struct chromeos_laptop {
/*
* Note that we can't mark this pointer as const because
* i2c_new_scanned_device() changes passed in I2C board info, so.
*/
struct i2c_peripheral *i2c_peripherals;
unsigned int num_i2c_peripherals;
struct acpi_peripheral *acpi_peripherals;
unsigned int num_acpi_peripherals;
};
static const struct chromeos_laptop *cros_laptop;
static struct i2c_client *
chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
struct i2c_board_info *info,
unsigned short alt_addr)
{
const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
struct i2c_client *client;
/*
* Add the i2c device. If we can't detect it at the primary
* address we scan secondary addresses. In any case the client
* structure gets assigned primary address.
*/
client = i2c_new_scanned_device(adapter, info, addr_list, NULL);
if (IS_ERR(client) && alt_addr) {
struct i2c_board_info dummy_info = {
I2C_BOARD_INFO("dummy", info->addr),
};
const unsigned short alt_addr_list[] = {
alt_addr, I2C_CLIENT_END
};
struct i2c_client *dummy;
dummy = i2c_new_scanned_device(adapter, &dummy_info,
alt_addr_list, NULL);
if (!IS_ERR(dummy)) {
pr_debug("%d-%02x is probed at %02x\n",
adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
client = i2c_new_client_device(adapter, info);
}
}
if (IS_ERR(client)) {
client = NULL;
pr_debug("failed to register device %d-%02x\n",
adapter->nr, info->addr);
} else {
pr_debug("added i2c device %d-%02x\n",
adapter->nr, info->addr);
}
return client;
}
static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
{
struct pci_dev *pdev;
if (!dev_is_pci(dev))
return false;
pdev = to_pci_dev(dev);
return devid == pci_dev_id(pdev);
}
static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
{
struct i2c_peripheral *i2c_dev;
int i;
for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
/* Skip devices already created */
if (i2c_dev->client)
continue;
if (strncmp(adapter->name, i2c_adapter_names[i2c_dev->type],
strlen(i2c_adapter_names[i2c_dev->type])))
continue;
if (i2c_dev->pci_devid &&
!chromeos_laptop_match_adapter_devid(adapter->dev.parent,
i2c_dev->pci_devid)) {
continue;
}
i2c_dev->client =
chromes_laptop_instantiate_i2c_device(adapter,
&i2c_dev->board_info,
i2c_dev->alt_addr);
}
}
static bool chromeos_laptop_adjust_client(struct i2c_client *client)
{
struct acpi_peripheral *acpi_dev;
struct acpi_device_id acpi_ids[2] = { };
int i;
int error;
if (!has_acpi_companion(&client->dev))
return false;
for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
acpi_dev = &cros_laptop->acpi_peripherals[i];
memcpy(acpi_ids[0].id, acpi_dev->hid, ACPI_ID_LEN);
if (acpi_match_device(acpi_ids, &client->dev)) {
error = device_add_software_node(&client->dev, &acpi_dev->swnode);
if (error) {
dev_err(&client->dev,
"failed to add properties: %d\n",
error);
break;
}
acpi_dev->client = client;
return true;
}
}
return false;
}
static void chromeos_laptop_detach_i2c_client(struct i2c_client *client)
{
struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
int i;
if (has_acpi_companion(&client->dev))
for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
acpi_dev = &cros_laptop->acpi_peripherals[i];
if (acpi_dev->client == client) {
acpi_dev->client = NULL;
return;
}
}
else
for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
if (i2c_dev->client == client) {
i2c_dev->client = NULL;
return;
}
}
}
static int chromeos_laptop_i2c_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->type == &i2c_adapter_type)
chromeos_laptop_check_adapter(to_i2c_adapter(dev));
else if (dev->type == &i2c_client_type)
chromeos_laptop_adjust_client(to_i2c_client(dev));
break;
case BUS_NOTIFY_REMOVED_DEVICE:
if (dev->type == &i2c_client_type)
chromeos_laptop_detach_i2c_client(to_i2c_client(dev));
break;
}
return 0;
}
static struct notifier_block chromeos_laptop_i2c_notifier = {
.notifier_call = chromeos_laptop_i2c_notifier_call,
};
#define DECLARE_CROS_LAPTOP(_name) \
static const struct chromeos_laptop _name __initconst = { \
.i2c_peripherals = _name##_peripherals, \
.num_i2c_peripherals = ARRAY_SIZE(_name##_peripherals), \
}
#define DECLARE_ACPI_CROS_LAPTOP(_name) \
static const struct chromeos_laptop _name __initconst = { \
.acpi_peripherals = _name##_peripherals, \
.num_acpi_peripherals = ARRAY_SIZE(_name##_peripherals), \
}
static struct i2c_peripheral samsung_series_5_550_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_SMBUS,
},
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
},
.dmi_name = "lightsensor",
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(samsung_series_5_550);
static struct i2c_peripheral samsung_series_5_peripherals[] __initdata = {
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
},
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(samsung_series_5);
static const int chromebook_pixel_tp_keys[] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
BTN_LEFT
};
static const struct property_entry
chromebook_pixel_trackpad_props[] __initconst = {
PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", chromebook_pixel_tp_keys),
{ }
};
static const struct property_entry
chromebook_atmel_touchscreen_props[] __initconst = {
PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
{ }
};
static struct i2c_peripheral chromebook_pixel_peripherals[] __initdata = {
/* Touch Screen. */
{
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_PANEL,
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
.properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("atmel_mxt_tp",
ATMEL_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_VGADDC,
.alt_addr = ATMEL_TP_I2C_BL_ADDR,
.properties = chromebook_pixel_trackpad_props,
},
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
},
.dmi_name = "lightsensor",
.type = I2C_ADAPTER_PANEL,
},
};
DECLARE_CROS_LAPTOP(chromebook_pixel);
static struct i2c_peripheral hp_chromebook_14_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
},
};
DECLARE_CROS_LAPTOP(hp_chromebook_14);
static struct i2c_peripheral dell_chromebook_11_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
},
/* Elan Touchpad option. */
{
.board_info = {
I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
},
};
DECLARE_CROS_LAPTOP(dell_chromebook_11);
static struct i2c_peripheral toshiba_cb35_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
},
};
DECLARE_CROS_LAPTOP(toshiba_cb35);
static struct i2c_peripheral acer_c7_chromebook_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(acer_c7_chromebook);
static struct i2c_peripheral acer_ac700_peripherals[] __initdata = {
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
},
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(acer_ac700);
static struct i2c_peripheral acer_c720_peripherals[] __initdata = {
/* Touchscreen. */
{
.board_info = {
I2C_BOARD_INFO("atmel_mxt_ts",
ATMEL_TS_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "touchscreen",
.irqflags = IRQF_TRIGGER_FALLING,
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
.alt_addr = ATMEL_TS_I2C_BL_ADDR,
.properties = chromebook_atmel_touchscreen_props,
},
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
},
/* Elan Touchpad option. */
{
.board_info = {
I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x1)),
},
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
},
.dmi_name = "lightsensor",
.type = I2C_ADAPTER_DESIGNWARE,
.pci_devid = PCI_DEVID(0, PCI_DEVFN(0x15, 0x2)),
},
};
DECLARE_CROS_LAPTOP(acer_c720);
static struct i2c_peripheral
hp_pavilion_14_chromebook_peripherals[] __initdata = {
/* Touchpad. */
{
.board_info = {
I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
.flags = I2C_CLIENT_WAKE,
},
.dmi_name = "trackpad",
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(hp_pavilion_14_chromebook);
static struct i2c_peripheral cr48_peripherals[] __initdata = {
/* Light Sensor. */
{
.board_info = {
I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
},
.type = I2C_ADAPTER_SMBUS,
},
};
DECLARE_CROS_LAPTOP(cr48);
static const u32 samus_touchpad_buttons[] __initconst = {
KEY_RESERVED,
KEY_RESERVED,
KEY_RESERVED,
BTN_LEFT
};
static const struct property_entry samus_trackpad_props[] __initconst = {
PROPERTY_ENTRY_STRING("compatible", "atmel,maxtouch"),
PROPERTY_ENTRY_U32_ARRAY("linux,gpio-keymap", samus_touchpad_buttons),
{ }
};
static struct acpi_peripheral samus_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
.swnode = {
.properties = samus_trackpad_props,
},
},
/* Touchsceen */
{
.hid = "ATML0001",
.swnode = {
.properties = chromebook_atmel_touchscreen_props,
},
},
};
DECLARE_ACPI_CROS_LAPTOP(samus);
static struct acpi_peripheral generic_atmel_peripherals[] __initdata = {
/* Touchpad */
{
.hid = "ATML0000",
.swnode = {
.properties = chromebook_pixel_trackpad_props,
},
},
/* Touchsceen */
{
.hid = "ATML0001",
.swnode = {
.properties = chromebook_atmel_touchscreen_props,
},
},
};
DECLARE_ACPI_CROS_LAPTOP(generic_atmel);
static const struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
{
.ident = "Samsung Series 5 550",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
},
.driver_data = (void *)&samsung_series_5_550,
},
{
.ident = "Samsung Series 5",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
},
.driver_data = (void *)&samsung_series_5,
},
{
.ident = "Chromebook Pixel",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
},
.driver_data = (void *)&chromebook_pixel,
},
{
.ident = "Wolf",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"),
},
.driver_data = (void *)&dell_chromebook_11,
},
{
.ident = "HP Chromebook 14",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Falco"),
},
.driver_data = (void *)&hp_chromebook_14,
},
{
.ident = "Toshiba CB35",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
DMI_MATCH(DMI_PRODUCT_NAME, "Leon"),
},
.driver_data = (void *)&toshiba_cb35,
},
{
.ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
},
.driver_data = (void *)&acer_c7_chromebook,
},
{
.ident = "Acer AC700",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
.driver_data = (void *)&acer_ac700,
},
{
.ident = "Acer C720",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
},
.driver_data = (void *)&acer_c720,
},
{
.ident = "HP Pavilion 14 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
},
.driver_data = (void *)&hp_pavilion_14_chromebook,
},
{
.ident = "Cr-48",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
},
.driver_data = (void *)&cr48,
},
/* Devices with peripherals incompletely described in ACPI */
{
.ident = "Chromebook Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
},
.driver_data = (void *)&samus,
},
{
.ident = "Google Pixel 2 (2015)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Samus"),
},
.driver_data = (void *)&samus,
},
{
.ident = "Samsung Chromebook 3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
},
.driver_data = (void *)&samus,
},
{
/*
* Other Chromebooks with Atmel touch controllers:
* - Winky (touchpad)
* - Clapper, Expresso, Rambi, Glimmer (touchscreen)
*/
.ident = "Other Chromebook",
.matches = {
/*
* This will match all Google devices, not only devices
* with Atmel, but we will validate that the device
* actually has matching peripherals.
*/
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
},
.driver_data = (void *)&generic_atmel,
},
{ }
};
MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
static int __init chromeos_laptop_scan_peripherals(struct device *dev, void *data)
{
int error;
if (dev->type == &i2c_adapter_type) {
chromeos_laptop_check_adapter(to_i2c_adapter(dev));
} else if (dev->type == &i2c_client_type) {
if (chromeos_laptop_adjust_client(to_i2c_client(dev))) {
/*
* Now that we have needed properties re-trigger
* driver probe in case driver was initialized
* earlier and probe failed.
*/
error = device_attach(dev);
if (error < 0)
dev_warn(dev,
"%s: device_attach() failed: %d\n",
__func__, error);
}
}
return 0;
}
static int __init chromeos_laptop_get_irq_from_dmi(const char *dmi_name)
{
const struct dmi_device *dmi_dev;
const struct dmi_dev_onboard *dev_data;
dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, dmi_name, NULL);
if (!dmi_dev) {
pr_err("failed to find DMI device '%s'\n", dmi_name);
return -ENOENT;
}
dev_data = dmi_dev->device_data;
if (!dev_data) {
pr_err("failed to get data from DMI for '%s'\n", dmi_name);
return -EINVAL;
}
return dev_data->instance;
}
static int __init chromeos_laptop_setup_irq(struct i2c_peripheral *i2c_dev)
{
int irq;
if (i2c_dev->dmi_name) {
irq = chromeos_laptop_get_irq_from_dmi(i2c_dev->dmi_name);
if (irq < 0)
return irq;
i2c_dev->irq_resource = (struct resource)
DEFINE_RES_NAMED(irq, 1, NULL,
IORESOURCE_IRQ | i2c_dev->irqflags);
i2c_dev->board_info.resources = &i2c_dev->irq_resource;
i2c_dev->board_info.num_resources = 1;
}
return 0;
}
static int __init
chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
struct i2c_peripheral *i2c_peripherals;
struct i2c_peripheral *i2c_dev;
struct i2c_board_info *info;
int i;
int error;
if (!src->num_i2c_peripherals)
return 0;
i2c_peripherals = kmemdup(src->i2c_peripherals,
src->num_i2c_peripherals *
sizeof(*src->i2c_peripherals),
GFP_KERNEL);
if (!i2c_peripherals)
return -ENOMEM;
for (i = 0; i < src->num_i2c_peripherals; i++) {
i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
error = chromeos_laptop_setup_irq(i2c_dev);
if (error)
goto err_out;
/* Create primary fwnode for the device - copies everything */
if (i2c_dev->properties) {
info->fwnode = fwnode_create_software_node(i2c_dev->properties, NULL);
if (IS_ERR(info->fwnode)) {
error = PTR_ERR(info->fwnode);
goto err_out;
}
}
}
cros_laptop->i2c_peripherals = i2c_peripherals;
cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
return 0;
err_out:
while (--i >= 0) {
i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
if (!IS_ERR_OR_NULL(info->fwnode))
fwnode_remove_software_node(info->fwnode);
}
kfree(i2c_peripherals);
return error;
}
static int __init
chromeos_laptop_prepare_acpi_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
struct acpi_peripheral *acpi_peripherals;
struct acpi_peripheral *acpi_dev;
const struct acpi_peripheral *src_dev;
int n_peripherals = 0;
int i;
int error;
for (i = 0; i < src->num_acpi_peripherals; i++) {
if (acpi_dev_present(src->acpi_peripherals[i].hid, NULL, -1))
n_peripherals++;
}
if (!n_peripherals)
return 0;
acpi_peripherals = kcalloc(n_peripherals,
sizeof(*src->acpi_peripherals),
GFP_KERNEL);
if (!acpi_peripherals)
return -ENOMEM;
acpi_dev = acpi_peripherals;
for (i = 0; i < src->num_acpi_peripherals; i++) {
src_dev = &src->acpi_peripherals[i];
if (!acpi_dev_present(src_dev->hid, NULL, -1))
continue;
*acpi_dev = *src_dev;
/* We need to deep-copy properties */
if (src_dev->swnode.properties) {
acpi_dev->swnode.properties =
property_entries_dup(src_dev->swnode.properties);
if (IS_ERR(acpi_dev->swnode.properties)) {
error = PTR_ERR(acpi_dev->swnode.properties);
goto err_out;
}
}
acpi_dev++;
}
cros_laptop->acpi_peripherals = acpi_peripherals;
cros_laptop->num_acpi_peripherals = n_peripherals;
return 0;
err_out:
while (--i >= 0) {
acpi_dev = &acpi_peripherals[i];
if (!IS_ERR_OR_NULL(acpi_dev->swnode.properties))
property_entries_free(acpi_dev->swnode.properties);
}
kfree(acpi_peripherals);
return error;
}
static void chromeos_laptop_destroy(const struct chromeos_laptop *cros_laptop)
{
const struct acpi_peripheral *acpi_dev;
struct i2c_peripheral *i2c_dev;
int i;
for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
i2c_dev = &cros_laptop->i2c_peripherals[i];
i2c_unregister_device(i2c_dev->client);
}
for (i = 0; i < cros_laptop->num_acpi_peripherals; i++) {
acpi_dev = &cros_laptop->acpi_peripherals[i];
if (acpi_dev->client)
device_remove_software_node(&acpi_dev->client->dev);
property_entries_free(acpi_dev->swnode.properties);
}
kfree(cros_laptop->i2c_peripherals);
kfree(cros_laptop->acpi_peripherals);
kfree(cros_laptop);
}
static struct chromeos_laptop * __init
chromeos_laptop_prepare(const struct chromeos_laptop *src)
{
struct chromeos_laptop *cros_laptop;
int error;
cros_laptop = kzalloc(sizeof(*cros_laptop), GFP_KERNEL);
if (!cros_laptop)
return ERR_PTR(-ENOMEM);
error = chromeos_laptop_prepare_i2c_peripherals(cros_laptop, src);
if (!error)
error = chromeos_laptop_prepare_acpi_peripherals(cros_laptop,
src);
if (error) {
chromeos_laptop_destroy(cros_laptop);
return ERR_PTR(error);
}
return cros_laptop;
}
static int __init chromeos_laptop_init(void)
{
const struct dmi_system_id *dmi_id;
int error;
dmi_id = dmi_first_match(chromeos_laptop_dmi_table);
if (!dmi_id) {
pr_debug("unsupported system\n");
return -ENODEV;
}
pr_debug("DMI Matched %s\n", dmi_id->ident);
cros_laptop = chromeos_laptop_prepare((void *)dmi_id->driver_data);
if (IS_ERR(cros_laptop))
return PTR_ERR(cros_laptop);
if (!cros_laptop->num_i2c_peripherals &&
!cros_laptop->num_acpi_peripherals) {
pr_debug("no relevant devices detected\n");
error = -ENODEV;
goto err_destroy_cros_laptop;
}
error = bus_register_notifier(&i2c_bus_type,
&chromeos_laptop_i2c_notifier);
if (error) {
pr_err("failed to register i2c bus notifier: %d\n",
error);
goto err_destroy_cros_laptop;
}
/*
* Scan adapters that have been registered and clients that have
* been created before we installed the notifier to make sure
* we do not miss any devices.
*/
i2c_for_each_dev(NULL, chromeos_laptop_scan_peripherals);
return 0;
err_destroy_cros_laptop:
chromeos_laptop_destroy(cros_laptop);
return error;
}
static void __exit chromeos_laptop_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &chromeos_laptop_i2c_notifier);
chromeos_laptop_destroy(cros_laptop);
}
module_init(chromeos_laptop_init);
module_exit(chromeos_laptop_exit);
MODULE_DESCRIPTION("Chrome OS Laptop driver");
MODULE_AUTHOR("Benson Leung <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/platform/chrome/chromeos_laptop.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*
* Sysfs properties to view and modify EC-controlled features on Wilco devices.
* The entries will appear under /sys/bus/platform/devices/GOOG000C:00/
*
* See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#define CMD_KB_CMOS 0x7C
#define SUB_CMD_KB_CMOS_AUTO_ON 0x03
struct boot_on_ac_request {
u8 cmd; /* Always CMD_KB_CMOS */
u8 reserved1;
u8 sub_cmd; /* Always SUB_CMD_KB_CMOS_AUTO_ON */
u8 reserved3to5[3];
u8 val; /* Either 0 or 1 */
u8 reserved7;
} __packed;
#define CMD_USB_CHARGE 0x39
enum usb_charge_op {
USB_CHARGE_GET = 0,
USB_CHARGE_SET = 1,
};
struct usb_charge_request {
u8 cmd; /* Always CMD_USB_CHARGE */
u8 reserved;
u8 op; /* One of enum usb_charge_op */
u8 val; /* When setting, either 0 or 1 */
} __packed;
struct usb_charge_response {
u8 reserved;
u8 status; /* Set by EC to 0 on success, other value on failure */
u8 val; /* When getting, set by EC to either 0 or 1 */
} __packed;
#define CMD_EC_INFO 0x38
enum get_ec_info_op {
CMD_GET_EC_LABEL = 0,
CMD_GET_EC_REV = 1,
CMD_GET_EC_MODEL = 2,
CMD_GET_EC_BUILD_DATE = 3,
};
struct get_ec_info_req {
u8 cmd; /* Always CMD_EC_INFO */
u8 reserved;
u8 op; /* One of enum get_ec_info_op */
} __packed;
struct get_ec_info_resp {
u8 reserved[2];
char value[9]; /* __nonstring: might not be null terminated */
} __packed;
static ssize_t boot_on_ac_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev);
struct boot_on_ac_request rq;
struct wilco_ec_message msg;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret < 0)
return ret;
if (val > 1)
return -EINVAL;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_KB_CMOS;
rq.sub_cmd = SUB_CMD_KB_CMOS_AUTO_ON;
rq.val = val;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = &rq;
msg.request_size = sizeof(rq);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_WO(boot_on_ac);
static ssize_t get_info(struct device *dev, char *buf, enum get_ec_info_op op)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev);
struct get_ec_info_req req = { .cmd = CMD_EC_INFO, .op = op };
struct get_ec_info_resp resp;
int ret;
struct wilco_ec_message msg = {
.type = WILCO_EC_MSG_LEGACY,
.request_data = &req,
.request_size = sizeof(req),
.response_data = &resp,
.response_size = sizeof(resp),
};
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
return sysfs_emit(buf, "%.*s\n", (int)sizeof(resp.value), (char *)&resp.value);
}
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return get_info(dev, buf, CMD_GET_EC_LABEL);
}
static DEVICE_ATTR_RO(version);
static ssize_t build_revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return get_info(dev, buf, CMD_GET_EC_REV);
}
static DEVICE_ATTR_RO(build_revision);
static ssize_t build_date_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return get_info(dev, buf, CMD_GET_EC_BUILD_DATE);
}
static DEVICE_ATTR_RO(build_date);
static ssize_t model_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return get_info(dev, buf, CMD_GET_EC_MODEL);
}
static DEVICE_ATTR_RO(model_number);
static int send_usb_charge(struct wilco_ec_device *ec,
struct usb_charge_request *rq,
struct usb_charge_response *rs)
{
struct wilco_ec_message msg;
int ret;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = rq;
msg.request_size = sizeof(*rq);
msg.response_data = rs;
msg.response_size = sizeof(*rs);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
if (rs->status)
return -EIO;
return 0;
}
static ssize_t usb_charge_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev);
struct usb_charge_request rq;
struct usb_charge_response rs;
int ret;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_USB_CHARGE;
rq.op = USB_CHARGE_GET;
ret = send_usb_charge(ec, &rq, &rs);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", rs.val);
}
static ssize_t usb_charge_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct wilco_ec_device *ec = dev_get_drvdata(dev);
struct usb_charge_request rq;
struct usb_charge_response rs;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret < 0)
return ret;
if (val > 1)
return -EINVAL;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_USB_CHARGE;
rq.op = USB_CHARGE_SET;
rq.val = val;
ret = send_usb_charge(ec, &rq, &rs);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_RW(usb_charge);
static struct attribute *wilco_dev_attrs[] = {
&dev_attr_boot_on_ac.attr,
&dev_attr_build_date.attr,
&dev_attr_build_revision.attr,
&dev_attr_model_number.attr,
&dev_attr_usb_charge.attr,
&dev_attr_version.attr,
NULL,
};
static const struct attribute_group wilco_dev_attr_group = {
.attrs = wilco_dev_attrs,
};
int wilco_ec_add_sysfs(struct wilco_ec_device *ec)
{
return sysfs_create_group(&ec->dev->kobj, &wilco_dev_attr_group);
}
void wilco_ec_remove_sysfs(struct wilco_ec_device *ec)
{
sysfs_remove_group(&ec->dev->kobj, &wilco_dev_attr_group);
}
| linux-master | drivers/platform/chrome/wilco_ec/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* debugfs attributes for Wilco EC
*
* Copyright 2019 Google LLC
*
* See Documentation/ABI/testing/debugfs-wilco-ec for usage.
*/
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
#define DRV_NAME "wilco-ec-debugfs"
/* The raw bytes will take up more space when represented as a hex string */
#define FORMATTED_BUFFER_SIZE (EC_MAILBOX_DATA_SIZE * 4)
struct wilco_ec_debugfs {
struct wilco_ec_device *ec;
struct dentry *dir;
size_t response_size;
u8 raw_data[EC_MAILBOX_DATA_SIZE];
u8 formatted_data[FORMATTED_BUFFER_SIZE];
};
static struct wilco_ec_debugfs *debug_info;
/**
* parse_hex_sentence() - Convert a ascii hex representation into byte array.
* @in: Input buffer of ascii.
* @isize: Length of input buffer.
* @out: Output buffer.
* @osize: Length of output buffer, e.g. max number of bytes to parse.
*
* An valid input is a series of ascii hexadecimal numbers, separated by spaces.
* An example valid input is
* " 00 f2 0 000076 6 0 ff"
*
* If an individual "word" within the hex sentence is longer than MAX_WORD_SIZE,
* then the sentence is illegal, and parsing will fail.
*
* Return: Number of bytes parsed, or negative error code on failure.
*/
static int parse_hex_sentence(const char *in, int isize, u8 *out, int osize)
{
int n_parsed = 0;
int word_start = 0;
int word_end;
int word_len;
/* Temp buffer for holding a "word" of chars that represents one byte */
#define MAX_WORD_SIZE 16
char tmp[MAX_WORD_SIZE + 1];
u8 byte;
while (word_start < isize && n_parsed < osize) {
/* Find the start of the next word */
while (word_start < isize && isspace(in[word_start]))
word_start++;
/* reached the end of the input before next word? */
if (word_start >= isize)
break;
/* Find the end of this word */
word_end = word_start;
while (word_end < isize && !isspace(in[word_end]))
word_end++;
/* Copy to a tmp NULL terminated string */
word_len = word_end - word_start;
if (word_len > MAX_WORD_SIZE)
return -EINVAL;
memcpy(tmp, in + word_start, word_len);
tmp[word_len] = '\0';
/*
* Convert from hex string, place in output. If fails to parse,
* just return -EINVAL because specific error code is only
* relevant for this one word, returning it would be confusing.
*/
if (kstrtou8(tmp, 16, &byte))
return -EINVAL;
out[n_parsed++] = byte;
word_start = word_end;
}
return n_parsed;
}
/* The message type takes up two bytes*/
#define TYPE_AND_DATA_SIZE ((EC_MAILBOX_DATA_SIZE) + 2)
static ssize_t raw_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
char *buf = debug_info->formatted_data;
struct wilco_ec_message msg;
u8 request_data[TYPE_AND_DATA_SIZE];
ssize_t kcount;
int ret;
if (count > FORMATTED_BUFFER_SIZE)
return -EINVAL;
kcount = simple_write_to_buffer(buf, FORMATTED_BUFFER_SIZE, ppos,
user_buf, count);
if (kcount < 0)
return kcount;
ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE);
if (ret < 0)
return ret;
/* Need at least two bytes for message type and one byte of data */
if (ret < 3)
return -EINVAL;
msg.type = request_data[0] << 8 | request_data[1];
msg.flags = 0;
msg.request_data = request_data + 2;
msg.request_size = ret - 2;
memset(debug_info->raw_data, 0, sizeof(debug_info->raw_data));
msg.response_data = debug_info->raw_data;
msg.response_size = EC_MAILBOX_DATA_SIZE;
ret = wilco_ec_mailbox(debug_info->ec, &msg);
if (ret < 0)
return ret;
debug_info->response_size = ret;
return count;
}
static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count,
loff_t *ppos)
{
int fmt_len = 0;
if (debug_info->response_size) {
fmt_len = hex_dump_to_buffer(debug_info->raw_data,
debug_info->response_size,
16, 1, debug_info->formatted_data,
sizeof(debug_info->formatted_data),
true);
/* Only return response the first time it is read */
debug_info->response_size = 0;
}
return simple_read_from_buffer(user_buf, count, ppos,
debug_info->formatted_data, fmt_len);
}
static const struct file_operations fops_raw = {
.owner = THIS_MODULE,
.read = raw_read,
.write = raw_write,
.llseek = no_llseek,
};
#define CMD_KB_CHROME 0x88
#define SUB_CMD_H1_GPIO 0x0A
#define SUB_CMD_TEST_EVENT 0x0B
struct ec_request {
u8 cmd; /* Always CMD_KB_CHROME */
u8 reserved;
u8 sub_cmd;
} __packed;
struct ec_response {
u8 status; /* 0 if allowed */
u8 val;
} __packed;
static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
{
struct ec_request rq;
struct ec_response rs;
struct wilco_ec_message msg;
int ret;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_KB_CHROME;
rq.sub_cmd = sub_cmd;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = &rq;
msg.request_size = sizeof(rq);
msg.response_data = &rs;
msg.response_size = sizeof(rs);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0)
return ret;
if (rs.status)
return -EIO;
*out_val = rs.val;
return 0;
}
/**
* h1_gpio_get() - Gets h1 gpio status.
* @arg: The wilco EC device.
* @val: BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL
*/
static int h1_gpio_get(void *arg, u64 *val)
{
int ret;
ret = send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
if (ret == 0)
*val &= 0xFF;
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
/**
* test_event_set() - Sends command to EC to cause an EC test event.
* @arg: The wilco EC device.
* @val: unused.
*/
static int test_event_set(void *arg, u64 val)
{
u8 ret;
return send_ec_cmd(arg, SUB_CMD_TEST_EVENT, &ret);
}
/* Format is unused since it is only required for get method which is NULL */
DEFINE_DEBUGFS_ATTRIBUTE(fops_test_event, NULL, test_event_set, "%llu\n");
/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
*
* Try to create a debugfs node. If it fails, then we don't want to change
* behavior at all, this is for debugging after all. Just fail silently.
*
* Return: 0 always.
*/
static int wilco_ec_debugfs_probe(struct platform_device *pdev)
{
struct wilco_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
debug_info = devm_kzalloc(&pdev->dev, sizeof(*debug_info), GFP_KERNEL);
if (!debug_info)
return 0;
debug_info->ec = ec;
debug_info->dir = debugfs_create_dir("wilco_ec", NULL);
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
&fops_h1_gpio);
debugfs_create_file("test_event", 0200, debug_info->dir, ec,
&fops_test_event);
return 0;
}
static int wilco_ec_debugfs_remove(struct platform_device *pdev)
{
debugfs_remove_recursive(debug_info->dir);
return 0;
}
static struct platform_driver wilco_ec_debugfs_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = wilco_ec_debugfs_probe,
.remove = wilco_ec_debugfs_remove,
};
module_platform_driver(wilco_ec_debugfs_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Nick Crews <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Wilco EC debugfs driver");
| linux-master | drivers/platform/chrome/wilco_ec/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/unaligned.h>
/* Operation code; what the EC should do with the property */
enum ec_property_op {
EC_OP_GET = 0,
EC_OP_SET = 1,
};
struct ec_property_request {
u8 op; /* One of enum ec_property_op */
u8 property_id[4]; /* The 32 bit PID is stored Little Endian */
u8 length;
u8 data[WILCO_EC_PROPERTY_MAX_SIZE];
} __packed;
struct ec_property_response {
u8 reserved[2];
u8 op; /* One of enum ec_property_op */
u8 property_id[4]; /* The 32 bit PID is stored Little Endian */
u8 length;
u8 data[WILCO_EC_PROPERTY_MAX_SIZE];
} __packed;
static int send_property_msg(struct wilco_ec_device *ec,
struct ec_property_request *rq,
struct ec_property_response *rs)
{
struct wilco_ec_message ec_msg;
int ret;
memset(&ec_msg, 0, sizeof(ec_msg));
ec_msg.type = WILCO_EC_MSG_PROPERTY;
ec_msg.request_data = rq;
ec_msg.request_size = sizeof(*rq);
ec_msg.response_data = rs;
ec_msg.response_size = sizeof(*rs);
ret = wilco_ec_mailbox(ec, &ec_msg);
if (ret < 0)
return ret;
if (rs->op != rq->op)
return -EBADMSG;
if (memcmp(rq->property_id, rs->property_id, sizeof(rs->property_id)))
return -EBADMSG;
return 0;
}
int wilco_ec_get_property(struct wilco_ec_device *ec,
struct wilco_ec_property_msg *prop_msg)
{
struct ec_property_request rq;
struct ec_property_response rs;
int ret;
memset(&rq, 0, sizeof(rq));
rq.op = EC_OP_GET;
put_unaligned_le32(prop_msg->property_id, rq.property_id);
ret = send_property_msg(ec, &rq, &rs);
if (ret < 0)
return ret;
prop_msg->length = rs.length;
memcpy(prop_msg->data, rs.data, rs.length);
return 0;
}
EXPORT_SYMBOL_GPL(wilco_ec_get_property);
int wilco_ec_set_property(struct wilco_ec_device *ec,
struct wilco_ec_property_msg *prop_msg)
{
struct ec_property_request rq;
struct ec_property_response rs;
int ret;
memset(&rq, 0, sizeof(rq));
rq.op = EC_OP_SET;
put_unaligned_le32(prop_msg->property_id, rq.property_id);
rq.length = prop_msg->length;
memcpy(rq.data, prop_msg->data, prop_msg->length);
ret = send_property_msg(ec, &rq, &rs);
if (ret < 0)
return ret;
if (rs.length != prop_msg->length)
return -EBADMSG;
return 0;
}
EXPORT_SYMBOL_GPL(wilco_ec_set_property);
int wilco_ec_get_byte_property(struct wilco_ec_device *ec, u32 property_id,
u8 *val)
{
struct wilco_ec_property_msg msg;
int ret;
msg.property_id = property_id;
ret = wilco_ec_get_property(ec, &msg);
if (ret < 0)
return ret;
if (msg.length != 1)
return -EBADMSG;
*val = msg.data[0];
return 0;
}
EXPORT_SYMBOL_GPL(wilco_ec_get_byte_property);
int wilco_ec_set_byte_property(struct wilco_ec_device *ec, u32 property_id,
u8 val)
{
struct wilco_ec_property_msg msg;
msg.property_id = property_id;
msg.data[0] = val;
msg.length = 1;
return wilco_ec_set_property(ec, &msg);
}
EXPORT_SYMBOL_GPL(wilco_ec_set_byte_property);
| linux-master | drivers/platform/chrome/wilco_ec/properties.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Telemetry communication for Wilco EC
*
* Copyright 2019 Google LLC
*
* The Wilco Embedded Controller is able to send telemetry data
* which is useful for enterprise applications. A daemon running on
* the OS sends a command to the EC via a write() to a char device,
* and can read the response with a read(). The write() request is
* verified by the driver to ensure that it is performing only one
* of the allowlisted commands, and that no extraneous data is
* being transmitted to the EC. The response is passed directly
* back to the reader with no modification.
*
* The character device will appear as /dev/wilco_telemN, where N
* is some small non-negative integer, starting with 0. Only one
* process may have the file descriptor open at a time. The calling
* userspace program needs to keep the device file descriptor open
* between the calls to write() and read() in order to preserve the
* response. Up to 32 bytes will be available for reading.
*
* For testing purposes, try requesting the EC's firmware build
* date, by sending the WILCO_EC_TELEM_GET_VERSION command with
* argument index=3. i.e. write [0x38, 0x00, 0x03]
* to the device node. An ASCII string of the build date is
* returned.
*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#define TELEM_DEV_NAME "wilco_telem"
#define TELEM_CLASS_NAME TELEM_DEV_NAME
#define DRV_NAME TELEM_DEV_NAME
#define TELEM_DEV_NAME_FMT (TELEM_DEV_NAME "%d")
static struct class telem_class = {
.name = TELEM_CLASS_NAME,
};
/* Keep track of all the device numbers used. */
#define TELEM_MAX_DEV 128
static int telem_major;
static DEFINE_IDA(telem_ida);
/* EC telemetry command codes */
#define WILCO_EC_TELEM_GET_LOG 0x99
#define WILCO_EC_TELEM_GET_VERSION 0x38
#define WILCO_EC_TELEM_GET_FAN_INFO 0x2E
#define WILCO_EC_TELEM_GET_DIAG_INFO 0xFA
#define WILCO_EC_TELEM_GET_TEMP_INFO 0x95
#define WILCO_EC_TELEM_GET_TEMP_READ 0x2C
#define WILCO_EC_TELEM_GET_BATT_EXT_INFO 0x07
#define WILCO_EC_TELEM_GET_BATT_PPID_INFO 0x8A
#define TELEM_ARGS_SIZE_MAX 30
/*
* The following telem_args_get_* structs are embedded within the |args| field
* of wilco_ec_telem_request.
*/
struct telem_args_get_log {
u8 log_type;
u8 log_index;
} __packed;
/*
* Get a piece of info about the EC firmware version:
* 0 = label
* 1 = svn_rev
* 2 = model_no
* 3 = build_date
* 4 = frio_version
*/
struct telem_args_get_version {
u8 index;
} __packed;
struct telem_args_get_fan_info {
u8 command;
u8 fan_number;
u8 arg;
} __packed;
struct telem_args_get_diag_info {
u8 type;
u8 sub_type;
} __packed;
struct telem_args_get_temp_info {
u8 command;
u8 index;
u8 field;
u8 zone;
} __packed;
struct telem_args_get_temp_read {
u8 sensor_index;
} __packed;
struct telem_args_get_batt_ext_info {
u8 var_args[5];
} __packed;
struct telem_args_get_batt_ppid_info {
u8 always1; /* Should always be 1 */
} __packed;
/**
* struct wilco_ec_telem_request - Telemetry command and arguments sent to EC.
* @command: One of WILCO_EC_TELEM_GET_* command codes.
* @reserved: Must be 0.
* @args: The first N bytes are one of telem_args_get_* structs, the rest is 0.
*/
struct wilco_ec_telem_request {
u8 command;
u8 reserved;
union {
u8 buf[TELEM_ARGS_SIZE_MAX];
struct telem_args_get_log get_log;
struct telem_args_get_version get_version;
struct telem_args_get_fan_info get_fan_info;
struct telem_args_get_diag_info get_diag_info;
struct telem_args_get_temp_info get_temp_info;
struct telem_args_get_temp_read get_temp_read;
struct telem_args_get_batt_ext_info get_batt_ext_info;
struct telem_args_get_batt_ppid_info get_batt_ppid_info;
} args;
} __packed;
/**
* check_telem_request() - Ensure that a request from userspace is valid.
* @rq: Request buffer copied from userspace.
* @size: Number of bytes copied from userspace.
*
* Return: 0 if valid, -EINVAL if bad command or reserved byte is non-zero,
* -EMSGSIZE if the request is too long.
*
* We do not want to allow userspace to send arbitrary telemetry commands to
* the EC. Therefore we check to ensure that
* 1. The request follows the format of struct wilco_ec_telem_request.
* 2. The supplied command code is one of the allowlisted commands.
* 3. The request only contains the necessary data for the header and arguments.
*/
static int check_telem_request(struct wilco_ec_telem_request *rq,
size_t size)
{
size_t max_size = offsetof(struct wilco_ec_telem_request, args);
if (rq->reserved)
return -EINVAL;
switch (rq->command) {
case WILCO_EC_TELEM_GET_LOG:
max_size += sizeof(rq->args.get_log);
break;
case WILCO_EC_TELEM_GET_VERSION:
max_size += sizeof(rq->args.get_version);
break;
case WILCO_EC_TELEM_GET_FAN_INFO:
max_size += sizeof(rq->args.get_fan_info);
break;
case WILCO_EC_TELEM_GET_DIAG_INFO:
max_size += sizeof(rq->args.get_diag_info);
break;
case WILCO_EC_TELEM_GET_TEMP_INFO:
max_size += sizeof(rq->args.get_temp_info);
break;
case WILCO_EC_TELEM_GET_TEMP_READ:
max_size += sizeof(rq->args.get_temp_read);
break;
case WILCO_EC_TELEM_GET_BATT_EXT_INFO:
max_size += sizeof(rq->args.get_batt_ext_info);
break;
case WILCO_EC_TELEM_GET_BATT_PPID_INFO:
if (rq->args.get_batt_ppid_info.always1 != 1)
return -EINVAL;
max_size += sizeof(rq->args.get_batt_ppid_info);
break;
default:
return -EINVAL;
}
return (size <= max_size) ? 0 : -EMSGSIZE;
}
/**
* struct telem_device_data - Data for a Wilco EC device that queries telemetry.
* @cdev: Char dev that userspace reads and polls from.
* @dev: Device associated with the %cdev.
* @ec: Wilco EC that we will be communicating with using the mailbox interface.
* @available: Boolean of if the device can be opened.
*/
struct telem_device_data {
struct device dev;
struct cdev cdev;
struct wilco_ec_device *ec;
atomic_t available;
};
#define TELEM_RESPONSE_SIZE EC_MAILBOX_DATA_SIZE
/**
* struct telem_session_data - Data that exists between open() and release().
* @dev_data: Pointer to get back to the device data and EC.
* @request: Command and arguments sent to EC.
* @response: Response buffer of data from EC.
* @has_msg: Is there data available to read from a previous write?
*/
struct telem_session_data {
struct telem_device_data *dev_data;
struct wilco_ec_telem_request request;
u8 response[TELEM_RESPONSE_SIZE];
bool has_msg;
};
/**
* telem_open() - Callback for when the device node is opened.
* @inode: inode for this char device node.
* @filp: file for this char device node.
*
* We need to ensure that after writing a command to the device,
* the same userspace process reads the corresponding result.
* Therefore, we increment a refcount on opening the device, so that
* only one process can communicate with the EC at a time.
*
* Return: 0 on success, or negative error code on failure.
*/
static int telem_open(struct inode *inode, struct file *filp)
{
struct telem_device_data *dev_data;
struct telem_session_data *sess_data;
/* Ensure device isn't already open */
dev_data = container_of(inode->i_cdev, struct telem_device_data, cdev);
if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0)
return -EBUSY;
get_device(&dev_data->dev);
sess_data = kzalloc(sizeof(*sess_data), GFP_KERNEL);
if (!sess_data) {
atomic_set(&dev_data->available, 1);
return -ENOMEM;
}
sess_data->dev_data = dev_data;
sess_data->has_msg = false;
stream_open(inode, filp);
filp->private_data = sess_data;
return 0;
}
static ssize_t telem_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct telem_session_data *sess_data = filp->private_data;
struct wilco_ec_message msg = {};
int ret;
if (count > sizeof(sess_data->request))
return -EMSGSIZE;
memset(&sess_data->request, 0, sizeof(sess_data->request));
if (copy_from_user(&sess_data->request, buf, count))
return -EFAULT;
ret = check_telem_request(&sess_data->request, count);
if (ret < 0)
return ret;
memset(sess_data->response, 0, sizeof(sess_data->response));
msg.type = WILCO_EC_MSG_TELEMETRY;
msg.request_data = &sess_data->request;
msg.request_size = sizeof(sess_data->request);
msg.response_data = sess_data->response;
msg.response_size = sizeof(sess_data->response);
ret = wilco_ec_mailbox(sess_data->dev_data->ec, &msg);
if (ret < 0)
return ret;
if (ret != sizeof(sess_data->response))
return -EMSGSIZE;
sess_data->has_msg = true;
return count;
}
static ssize_t telem_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct telem_session_data *sess_data = filp->private_data;
if (!sess_data->has_msg)
return -ENODATA;
if (count > sizeof(sess_data->response))
return -EINVAL;
if (copy_to_user(buf, sess_data->response, count))
return -EFAULT;
sess_data->has_msg = false;
return count;
}
static int telem_release(struct inode *inode, struct file *filp)
{
struct telem_session_data *sess_data = filp->private_data;
atomic_set(&sess_data->dev_data->available, 1);
put_device(&sess_data->dev_data->dev);
kfree(sess_data);
return 0;
}
static const struct file_operations telem_fops = {
.open = telem_open,
.write = telem_write,
.read = telem_read,
.release = telem_release,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
/**
* telem_device_free() - Callback to free the telem_device_data structure.
* @d: The device embedded in our device data, which we have been ref counting.
*
* Once all open file descriptors are closed and the device has been removed,
* the refcount of the device will fall to 0 and this will be called.
*/
static void telem_device_free(struct device *d)
{
struct telem_device_data *dev_data;
dev_data = container_of(d, struct telem_device_data, dev);
kfree(dev_data);
}
/**
* telem_device_probe() - Callback when creating a new device.
* @pdev: platform device that we will be receiving telems from.
*
* This finds a free minor number for the device, allocates and initializes
* some device data, and creates a new device and char dev node.
*
* Return: 0 on success, negative error code on failure.
*/
static int telem_device_probe(struct platform_device *pdev)
{
struct telem_device_data *dev_data;
int error, minor;
/* Get the next available device number */
minor = ida_alloc_max(&telem_ida, TELEM_MAX_DEV-1, GFP_KERNEL);
if (minor < 0) {
error = minor;
dev_err(&pdev->dev, "Failed to find minor number: %d\n", error);
return error;
}
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data) {
ida_simple_remove(&telem_ida, minor);
return -ENOMEM;
}
/* Initialize the device data */
dev_data->ec = dev_get_platdata(&pdev->dev);
atomic_set(&dev_data->available, 1);
platform_set_drvdata(pdev, dev_data);
/* Initialize the device */
dev_data->dev.devt = MKDEV(telem_major, minor);
dev_data->dev.class = &telem_class;
dev_data->dev.release = telem_device_free;
dev_set_name(&dev_data->dev, TELEM_DEV_NAME_FMT, minor);
device_initialize(&dev_data->dev);
/* Initialize the character device and add it to userspace */;
cdev_init(&dev_data->cdev, &telem_fops);
error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
if (error) {
put_device(&dev_data->dev);
ida_simple_remove(&telem_ida, minor);
return error;
}
return 0;
}
static int telem_device_remove(struct platform_device *pdev)
{
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
put_device(&dev_data->dev);
return 0;
}
static struct platform_driver telem_driver = {
.probe = telem_device_probe,
.remove = telem_device_remove,
.driver = {
.name = DRV_NAME,
},
};
static int __init telem_module_init(void)
{
dev_t dev_num = 0;
int ret;
ret = class_register(&telem_class);
if (ret) {
pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
return ret;
}
/* Request the kernel for device numbers, starting with minor=0 */
ret = alloc_chrdev_region(&dev_num, 0, TELEM_MAX_DEV, TELEM_DEV_NAME);
if (ret) {
pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
goto destroy_class;
}
telem_major = MAJOR(dev_num);
ret = platform_driver_register(&telem_driver);
if (ret < 0) {
pr_err(DRV_NAME ": Failed registering driver: %d\n", ret);
goto unregister_region;
}
return 0;
unregister_region:
unregister_chrdev_region(MKDEV(telem_major, 0), TELEM_MAX_DEV);
destroy_class:
class_unregister(&telem_class);
ida_destroy(&telem_ida);
return ret;
}
static void __exit telem_module_exit(void)
{
platform_driver_unregister(&telem_driver);
unregister_chrdev_region(MKDEV(telem_major, 0), TELEM_MAX_DEV);
class_unregister(&telem_class);
ida_destroy(&telem_ida);
}
module_init(telem_module_init);
module_exit(telem_module_exit);
MODULE_AUTHOR("Nick Crews <[email protected]>");
MODULE_DESCRIPTION("Wilco EC telemetry driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/wilco_ec/telemetry.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Core driver for Wilco Embedded Controller
*
* Copyright 2018 Google LLC
*
* This is the entry point for the drivers that control the Wilco EC.
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
#include "../cros_ec_lpc_mec.h"
#define DRV_NAME "wilco-ec"
static struct resource *wilco_get_resource(struct platform_device *pdev,
int index)
{
struct device *dev = &pdev->dev;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, index);
if (!res) {
dev_dbg(dev, "Couldn't find IO resource %d\n", index);
return res;
}
return devm_request_region(dev, res->start, resource_size(res),
dev_name(dev));
}
static int wilco_ec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct wilco_ec_device *ec;
int ret;
ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
if (!ec)
return -ENOMEM;
platform_set_drvdata(pdev, ec);
ec->dev = dev;
mutex_init(&ec->mailbox_lock);
ec->data_size = sizeof(struct wilco_ec_response) + EC_MAILBOX_DATA_SIZE;
ec->data_buffer = devm_kzalloc(dev, ec->data_size, GFP_KERNEL);
if (!ec->data_buffer)
return -ENOMEM;
/* Prepare access to IO regions provided by ACPI */
ec->io_data = wilco_get_resource(pdev, 0); /* Host Data */
ec->io_command = wilco_get_resource(pdev, 1); /* Host Command */
ec->io_packet = wilco_get_resource(pdev, 2); /* MEC EMI */
if (!ec->io_data || !ec->io_command || !ec->io_packet)
return -ENODEV;
/* Initialize cros_ec register interface for communication */
cros_ec_lpc_mec_init(ec->io_packet->start,
ec->io_packet->start + EC_MAILBOX_DATA_SIZE);
/*
* Register a child device that will be found by the debugfs driver.
* Ignore failure.
*/
ec->debugfs_pdev = platform_device_register_data(dev,
"wilco-ec-debugfs",
PLATFORM_DEVID_AUTO,
NULL, 0);
/* Register a child device that will be found by the RTC driver. */
ec->rtc_pdev = platform_device_register_data(dev, "rtc-wilco-ec",
PLATFORM_DEVID_AUTO,
NULL, 0);
if (IS_ERR(ec->rtc_pdev)) {
dev_err(dev, "Failed to create RTC platform device\n");
ret = PTR_ERR(ec->rtc_pdev);
goto unregister_debugfs;
}
/* Set up the keyboard backlight LEDs. */
ret = wilco_keyboard_leds_init(ec);
if (ret < 0) {
dev_err(dev,
"Failed to initialize keyboard LEDs: %d\n",
ret);
goto unregister_rtc;
}
ret = wilco_ec_add_sysfs(ec);
if (ret < 0) {
dev_err(dev, "Failed to create sysfs entries: %d\n", ret);
goto unregister_rtc;
}
/* Register child device to be found by charger config driver. */
ec->charger_pdev = platform_device_register_data(dev, "wilco-charger",
PLATFORM_DEVID_AUTO,
NULL, 0);
if (IS_ERR(ec->charger_pdev)) {
dev_err(dev, "Failed to create charger platform device\n");
ret = PTR_ERR(ec->charger_pdev);
goto remove_sysfs;
}
/* Register child device that will be found by the telemetry driver. */
ec->telem_pdev = platform_device_register_data(dev, "wilco_telem",
PLATFORM_DEVID_AUTO,
ec, sizeof(*ec));
if (IS_ERR(ec->telem_pdev)) {
dev_err(dev, "Failed to create telemetry platform device\n");
ret = PTR_ERR(ec->telem_pdev);
goto unregister_charge_config;
}
return 0;
unregister_charge_config:
platform_device_unregister(ec->charger_pdev);
remove_sysfs:
wilco_ec_remove_sysfs(ec);
unregister_rtc:
platform_device_unregister(ec->rtc_pdev);
unregister_debugfs:
if (ec->debugfs_pdev)
platform_device_unregister(ec->debugfs_pdev);
return ret;
}
static int wilco_ec_remove(struct platform_device *pdev)
{
struct wilco_ec_device *ec = platform_get_drvdata(pdev);
platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->charger_pdev);
wilco_ec_remove_sysfs(ec);
platform_device_unregister(ec->rtc_pdev);
if (ec->debugfs_pdev)
platform_device_unregister(ec->debugfs_pdev);
return 0;
}
static const struct acpi_device_id wilco_ec_acpi_device_ids[] = {
{ "GOOG000C", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids);
static struct platform_driver wilco_ec_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = wilco_ec_acpi_device_ids,
},
.probe = wilco_ec_probe,
.remove = wilco_ec_remove,
};
module_platform_driver(wilco_ec_driver);
MODULE_AUTHOR("Nick Crews <[email protected]>");
MODULE_AUTHOR("Duncan Laurie <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/wilco_ec/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI event handling for Wilco Embedded Controller
*
* Copyright 2019 Google LLC
*
* The Wilco Embedded Controller can create custom events that
* are not handled as standard ACPI objects. These events can
* contain information about changes in EC controlled features,
* such as errors and events in the dock or display. For example,
* an event is triggered if the dock is plugged into a display
* incorrectly. These events are needed for telemetry and
* diagnostics reasons, and for possibly alerting the user.
* These events are triggered by the EC with an ACPI Notify(0x90),
* and then the BIOS reads the event buffer from EC RAM via an
* ACPI method. When the OS receives these events via ACPI,
* it passes them along to this driver. The events are put into
* a queue which can be read by a userspace daemon via a char device
* that implements read() and poll(). The event queue acts as a
* circular buffer of size 64, so if there are no userspace consumers
* the kernel will not run out of memory. The char device will appear at
* /dev/wilco_event{n}, where n is some small non-negative integer,
* starting from 0. Standard ACPI events such as the battery getting
* plugged/unplugged can also come through this path, but they are
* dealt with via other paths, and are ignored here.
* To test, you can tail the binary data with
* $ cat /dev/wilco_event0 | hexdump -ve '1/1 "%x\n"'
* and then create an event by plugging/unplugging the battery.
*/
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
/* ACPI Notify event code indicating event data is available. */
#define EC_ACPI_NOTIFY_EVENT 0x90
/* ACPI Method to execute to retrieve event data buffer from the EC. */
#define EC_ACPI_GET_EVENT "QSET"
/* Maximum number of words in event data returned by the EC. */
#define EC_ACPI_MAX_EVENT_WORDS 6
#define EC_ACPI_MAX_EVENT_SIZE \
(sizeof(struct ec_event) + (EC_ACPI_MAX_EVENT_WORDS) * sizeof(u16))
/* Node will appear in /dev/EVENT_DEV_NAME */
#define EVENT_DEV_NAME "wilco_event"
#define EVENT_CLASS_NAME EVENT_DEV_NAME
#define DRV_NAME EVENT_DEV_NAME
#define EVENT_DEV_NAME_FMT (EVENT_DEV_NAME "%d")
static struct class event_class = {
.name = EVENT_CLASS_NAME,
};
/* Keep track of all the device numbers used. */
#define EVENT_MAX_DEV 128
static int event_major;
static DEFINE_IDA(event_ida);
/* Size of circular queue of events. */
#define MAX_NUM_EVENTS 64
/**
* struct ec_event - Extended event returned by the EC.
* @size: Number of 16bit words in structure after the size word.
* @type: Extended event type, meaningless for us.
* @event: Event data words. Max count is %EC_ACPI_MAX_EVENT_WORDS.
*/
struct ec_event {
u16 size;
u16 type;
u16 event[];
} __packed;
#define ec_event_num_words(ev) (ev->size - 1)
#define ec_event_size(ev) (sizeof(*ev) + (ec_event_num_words(ev) * sizeof(u16)))
/**
* struct ec_event_queue - Circular queue for events.
* @capacity: Number of elements the queue can hold.
* @head: Next index to write to.
* @tail: Next index to read from.
* @entries: Array of events.
*/
struct ec_event_queue {
int capacity;
int head;
int tail;
struct ec_event *entries[];
};
/* Maximum number of events to store in ec_event_queue */
static int queue_size = 64;
module_param(queue_size, int, 0644);
static struct ec_event_queue *event_queue_new(int capacity)
{
struct ec_event_queue *q;
q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL);
if (!q)
return NULL;
q->capacity = capacity;
return q;
}
static inline bool event_queue_empty(struct ec_event_queue *q)
{
/* head==tail when both full and empty, but head==NULL when empty */
return q->head == q->tail && !q->entries[q->head];
}
static inline bool event_queue_full(struct ec_event_queue *q)
{
/* head==tail when both full and empty, but head!=NULL when full */
return q->head == q->tail && q->entries[q->head];
}
static struct ec_event *event_queue_pop(struct ec_event_queue *q)
{
struct ec_event *ev;
if (event_queue_empty(q))
return NULL;
ev = q->entries[q->tail];
q->entries[q->tail] = NULL;
q->tail = (q->tail + 1) % q->capacity;
return ev;
}
/*
* If full, overwrite the oldest event and return it so the caller
* can kfree it. If not full, return NULL.
*/
static struct ec_event *event_queue_push(struct ec_event_queue *q,
struct ec_event *ev)
{
struct ec_event *popped = NULL;
if (event_queue_full(q))
popped = event_queue_pop(q);
q->entries[q->head] = ev;
q->head = (q->head + 1) % q->capacity;
return popped;
}
static void event_queue_free(struct ec_event_queue *q)
{
struct ec_event *event;
while ((event = event_queue_pop(q)) != NULL)
kfree(event);
kfree(q);
}
/**
* struct event_device_data - Data for a Wilco EC device that responds to ACPI.
* @events: Circular queue of EC events to be provided to userspace.
* @queue_lock: Protect the queue from simultaneous read/writes.
* @wq: Wait queue to notify processes when events are available or the
* device has been removed.
* @cdev: Char dev that userspace reads() and polls() from.
* @dev: Device associated with the %cdev.
* @exist: Has the device been not been removed? Once a device has been removed,
* writes, reads, and new opens will fail.
* @available: Guarantee only one client can open() file and read from queue.
*
* There will be one of these structs for each ACPI device registered. This data
* is the queue of events received from ACPI that still need to be read from
* userspace, the device and char device that userspace is using, a wait queue
* used to notify different threads when something has changed, plus a flag
* on whether the ACPI device has been removed.
*/
struct event_device_data {
struct ec_event_queue *events;
spinlock_t queue_lock;
wait_queue_head_t wq;
struct device dev;
struct cdev cdev;
bool exist;
atomic_t available;
};
/**
* enqueue_events() - Place EC events in queue to be read by userspace.
* @adev: Device the events came from.
* @buf: Buffer of event data.
* @length: Length of event data buffer.
*
* %buf contains a number of ec_event's, packed one after the other.
* Each ec_event is of variable length. Start with the first event, copy it
* into a persistent ec_event, store that entry in the queue, move on
* to the next ec_event in buf, and repeat.
*
* Return: 0 on success or negative error code on failure.
*/
static int enqueue_events(struct acpi_device *adev, const u8 *buf, u32 length)
{
struct event_device_data *dev_data = adev->driver_data;
struct ec_event *event, *queue_event, *old_event;
size_t num_words, event_size;
u32 offset = 0;
while (offset < length) {
event = (struct ec_event *)(buf + offset);
num_words = ec_event_num_words(event);
event_size = ec_event_size(event);
if (num_words > EC_ACPI_MAX_EVENT_WORDS) {
dev_err(&adev->dev, "Too many event words: %zu > %d\n",
num_words, EC_ACPI_MAX_EVENT_WORDS);
return -EOVERFLOW;
}
/* Ensure event does not overflow the available buffer */
if ((offset + event_size) > length) {
dev_err(&adev->dev, "Event exceeds buffer: %zu > %d\n",
offset + event_size, length);
return -EOVERFLOW;
}
/* Point to the next event in the buffer */
offset += event_size;
/* Copy event into the queue */
queue_event = kmemdup(event, event_size, GFP_KERNEL);
if (!queue_event)
return -ENOMEM;
spin_lock(&dev_data->queue_lock);
old_event = event_queue_push(dev_data->events, queue_event);
spin_unlock(&dev_data->queue_lock);
kfree(old_event);
wake_up_interruptible(&dev_data->wq);
}
return 0;
}
/**
* event_device_notify() - Callback when EC generates an event over ACPI.
* @adev: The device that the event is coming from.
* @value: Value passed to Notify() in ACPI.
*
* This function will read the events from the device and enqueue them.
*/
static void event_device_notify(struct acpi_device *adev, u32 value)
{
struct acpi_buffer event_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
if (value != EC_ACPI_NOTIFY_EVENT) {
dev_err(&adev->dev, "Invalid event: 0x%08x\n", value);
return;
}
/* Execute ACPI method to get event data buffer. */
status = acpi_evaluate_object(adev->handle, EC_ACPI_GET_EVENT,
NULL, &event_buffer);
if (ACPI_FAILURE(status)) {
dev_err(&adev->dev, "Error executing ACPI method %s()\n",
EC_ACPI_GET_EVENT);
return;
}
obj = (union acpi_object *)event_buffer.pointer;
if (!obj) {
dev_err(&adev->dev, "Nothing returned from %s()\n",
EC_ACPI_GET_EVENT);
return;
}
if (obj->type != ACPI_TYPE_BUFFER) {
dev_err(&adev->dev, "Invalid object returned from %s()\n",
EC_ACPI_GET_EVENT);
kfree(obj);
return;
}
if (obj->buffer.length < sizeof(struct ec_event)) {
dev_err(&adev->dev, "Invalid buffer length %d from %s()\n",
obj->buffer.length, EC_ACPI_GET_EVENT);
kfree(obj);
return;
}
enqueue_events(adev, obj->buffer.pointer, obj->buffer.length);
kfree(obj);
}
static int event_open(struct inode *inode, struct file *filp)
{
struct event_device_data *dev_data;
dev_data = container_of(inode->i_cdev, struct event_device_data, cdev);
if (!dev_data->exist)
return -ENODEV;
if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0)
return -EBUSY;
/* Increase refcount on device so dev_data is not freed */
get_device(&dev_data->dev);
stream_open(inode, filp);
filp->private_data = dev_data;
return 0;
}
static __poll_t event_poll(struct file *filp, poll_table *wait)
{
struct event_device_data *dev_data = filp->private_data;
__poll_t mask = 0;
poll_wait(filp, &dev_data->wq, wait);
if (!dev_data->exist)
return EPOLLHUP;
if (!event_queue_empty(dev_data->events))
mask |= EPOLLIN | EPOLLRDNORM | EPOLLPRI;
return mask;
}
/**
* event_read() - Callback for passing event data to userspace via read().
* @filp: The file we are reading from.
* @buf: Pointer to userspace buffer to fill with one event.
* @count: Number of bytes requested. Must be at least EC_ACPI_MAX_EVENT_SIZE.
* @pos: File position pointer, irrelevant since we don't support seeking.
*
* Removes the first event from the queue, places it in the passed buffer.
*
* If there are no events in the queue, then one of two things happens,
* depending on if the file was opened in nonblocking mode: If in nonblocking
* mode, then return -EAGAIN to say there's no data. If in blocking mode, then
* block until an event is available.
*
* Return: Number of bytes placed in buffer, negative error code on failure.
*/
static ssize_t event_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct event_device_data *dev_data = filp->private_data;
struct ec_event *event;
ssize_t n_bytes_written = 0;
int err;
/* We only will give them the entire event at once */
if (count != 0 && count < EC_ACPI_MAX_EVENT_SIZE)
return -EINVAL;
spin_lock(&dev_data->queue_lock);
while (event_queue_empty(dev_data->events)) {
spin_unlock(&dev_data->queue_lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(dev_data->wq,
!event_queue_empty(dev_data->events) ||
!dev_data->exist);
if (err)
return err;
/* Device was removed as we waited? */
if (!dev_data->exist)
return -ENODEV;
spin_lock(&dev_data->queue_lock);
}
event = event_queue_pop(dev_data->events);
spin_unlock(&dev_data->queue_lock);
n_bytes_written = ec_event_size(event);
if (copy_to_user(buf, event, n_bytes_written))
n_bytes_written = -EFAULT;
kfree(event);
return n_bytes_written;
}
static int event_release(struct inode *inode, struct file *filp)
{
struct event_device_data *dev_data = filp->private_data;
atomic_set(&dev_data->available, 1);
put_device(&dev_data->dev);
return 0;
}
static const struct file_operations event_fops = {
.open = event_open,
.poll = event_poll,
.read = event_read,
.release = event_release,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
/**
* free_device_data() - Callback to free the event_device_data structure.
* @d: The device embedded in our device data, which we have been ref counting.
*
* This is called only after event_device_remove() has been called and all
* userspace programs have called event_release() on all the open file
* descriptors.
*/
static void free_device_data(struct device *d)
{
struct event_device_data *dev_data;
dev_data = container_of(d, struct event_device_data, dev);
event_queue_free(dev_data->events);
kfree(dev_data);
}
static void hangup_device(struct event_device_data *dev_data)
{
dev_data->exist = false;
/* Wake up the waiting processes so they can close. */
wake_up_interruptible(&dev_data->wq);
put_device(&dev_data->dev);
}
/**
* event_device_add() - Callback when creating a new device.
* @adev: ACPI device that we will be receiving events from.
*
* This finds a free minor number for the device, allocates and initializes
* some device data, and creates a new device and char dev node.
*
* The device data is freed in free_device_data(), which is called when
* %dev_data->dev is release()ed. This happens after all references to
* %dev_data->dev are dropped, which happens once both event_device_remove()
* has been called and every open()ed file descriptor has been release()ed.
*
* Return: 0 on success, negative error code on failure.
*/
static int event_device_add(struct acpi_device *adev)
{
struct event_device_data *dev_data;
int error, minor;
minor = ida_alloc_max(&event_ida, EVENT_MAX_DEV-1, GFP_KERNEL);
if (minor < 0) {
error = minor;
dev_err(&adev->dev, "Failed to find minor number: %d\n", error);
return error;
}
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data) {
error = -ENOMEM;
goto free_minor;
}
/* Initialize the device data. */
adev->driver_data = dev_data;
dev_data->events = event_queue_new(queue_size);
if (!dev_data->events) {
kfree(dev_data);
error = -ENOMEM;
goto free_minor;
}
spin_lock_init(&dev_data->queue_lock);
init_waitqueue_head(&dev_data->wq);
dev_data->exist = true;
atomic_set(&dev_data->available, 1);
/* Initialize the device. */
dev_data->dev.devt = MKDEV(event_major, minor);
dev_data->dev.class = &event_class;
dev_data->dev.release = free_device_data;
dev_set_name(&dev_data->dev, EVENT_DEV_NAME_FMT, minor);
device_initialize(&dev_data->dev);
/* Initialize the character device, and add it to userspace. */
cdev_init(&dev_data->cdev, &event_fops);
error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
if (error)
goto free_dev_data;
return 0;
free_dev_data:
hangup_device(dev_data);
free_minor:
ida_simple_remove(&event_ida, minor);
return error;
}
static void event_device_remove(struct acpi_device *adev)
{
struct event_device_data *dev_data = adev->driver_data;
cdev_device_del(&dev_data->cdev, &dev_data->dev);
ida_simple_remove(&event_ida, MINOR(dev_data->dev.devt));
hangup_device(dev_data);
}
static const struct acpi_device_id event_acpi_ids[] = {
{ "GOOG000D", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, event_acpi_ids);
static struct acpi_driver event_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
.ids = event_acpi_ids,
.ops = {
.add = event_device_add,
.notify = event_device_notify,
.remove = event_device_remove,
},
.owner = THIS_MODULE,
};
static int __init event_module_init(void)
{
dev_t dev_num = 0;
int ret;
ret = class_register(&event_class);
if (ret) {
pr_err(DRV_NAME ": Failed registering class: %d\n", ret);
return ret;
}
/* Request device numbers, starting with minor=0. Save the major num. */
ret = alloc_chrdev_region(&dev_num, 0, EVENT_MAX_DEV, EVENT_DEV_NAME);
if (ret) {
pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret);
goto destroy_class;
}
event_major = MAJOR(dev_num);
ret = acpi_bus_register_driver(&event_driver);
if (ret < 0) {
pr_err(DRV_NAME ": Failed registering driver: %d\n", ret);
goto unregister_region;
}
return 0;
unregister_region:
unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
destroy_class:
class_unregister(&event_class);
ida_destroy(&event_ida);
return ret;
}
static void __exit event_module_exit(void)
{
acpi_bus_unregister_driver(&event_driver);
unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV);
class_unregister(&event_class);
ida_destroy(&event_ida);
}
module_init(event_module_init);
module_exit(event_module_exit);
MODULE_AUTHOR("Nick Crews <[email protected]>");
MODULE_DESCRIPTION("Wilco EC ACPI event driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/platform/chrome/wilco_ec/event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Keyboard backlight LED driver for the Wilco Embedded Controller
*
* Copyright 2019 Google LLC
*
* Since the EC will never change the backlight level of its own accord,
* we don't need to implement a brightness_get() method.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/slab.h>
#define WILCO_EC_COMMAND_KBBL 0x75
#define WILCO_KBBL_MODE_FLAG_PWM BIT(1) /* Set brightness by percent. */
#define WILCO_KBBL_DEFAULT_BRIGHTNESS 0
struct wilco_keyboard_leds {
struct wilco_ec_device *ec;
struct led_classdev keyboard;
};
enum wilco_kbbl_subcommand {
WILCO_KBBL_SUBCMD_GET_FEATURES = 0x00,
WILCO_KBBL_SUBCMD_GET_STATE = 0x01,
WILCO_KBBL_SUBCMD_SET_STATE = 0x02,
};
/**
* struct wilco_keyboard_leds_msg - Message to/from EC for keyboard LED control.
* @command: Always WILCO_EC_COMMAND_KBBL.
* @status: Set by EC to 0 on success, 0xFF on failure.
* @subcmd: One of enum wilco_kbbl_subcommand.
* @reserved3: Should be 0.
* @mode: Bit flags for used mode, we want to use WILCO_KBBL_MODE_FLAG_PWM.
* @reserved5to8: Should be 0.
* @percent: Brightness in 0-100. Only meaningful in PWM mode.
* @reserved10to15: Should be 0.
*/
struct wilco_keyboard_leds_msg {
u8 command;
u8 status;
u8 subcmd;
u8 reserved3;
u8 mode;
u8 reserved5to8[4];
u8 percent;
u8 reserved10to15[6];
} __packed;
/* Send a request, get a response, and check that the response is good. */
static int send_kbbl_msg(struct wilco_ec_device *ec,
struct wilco_keyboard_leds_msg *request,
struct wilco_keyboard_leds_msg *response)
{
struct wilco_ec_message msg;
int ret;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
msg.request_data = request;
msg.request_size = sizeof(*request);
msg.response_data = response;
msg.response_size = sizeof(*response);
ret = wilco_ec_mailbox(ec, &msg);
if (ret < 0) {
dev_err(ec->dev,
"Failed sending keyboard LEDs command: %d\n", ret);
return ret;
}
return 0;
}
static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE;
request.mode = WILCO_KBBL_MODE_FLAG_PWM;
request.percent = brightness;
ret = send_kbbl_msg(ec, &request, &response);
if (ret < 0)
return ret;
if (response.status) {
dev_err(ec->dev,
"EC reported failure sending keyboard LEDs command: %d\n",
response.status);
return -EIO;
}
return 0;
}
static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
request.subcmd = WILCO_KBBL_SUBCMD_GET_FEATURES;
ret = send_kbbl_msg(ec, &request, &response);
if (ret < 0)
return ret;
*exists = response.status != 0xFF;
return 0;
}
/**
* kbbl_init() - Initialize the state of the keyboard backlight.
* @ec: EC device to talk to.
*
* Gets the current brightness, ensuring that the BIOS already initialized the
* backlight to PWM mode. If not in PWM mode, then the current brightness is
* meaningless, so set the brightness to WILCO_KBBL_DEFAULT_BRIGHTNESS.
*
* Return: Final brightness of the keyboard, or negative error code on failure.
*/
static int kbbl_init(struct wilco_ec_device *ec)
{
struct wilco_keyboard_leds_msg request;
struct wilco_keyboard_leds_msg response;
int ret;
memset(&request, 0, sizeof(request));
request.command = WILCO_EC_COMMAND_KBBL;
request.subcmd = WILCO_KBBL_SUBCMD_GET_STATE;
ret = send_kbbl_msg(ec, &request, &response);
if (ret < 0)
return ret;
if (response.status) {
dev_err(ec->dev,
"EC reported failure sending keyboard LEDs command: %d\n",
response.status);
return -EIO;
}
if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
return response.percent;
ret = set_kbbl(ec, WILCO_KBBL_DEFAULT_BRIGHTNESS);
if (ret < 0)
return ret;
return WILCO_KBBL_DEFAULT_BRIGHTNESS;
}
static int wilco_keyboard_leds_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct wilco_keyboard_leds *wkl =
container_of(cdev, struct wilco_keyboard_leds, keyboard);
return set_kbbl(wkl->ec, brightness);
}
int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
{
struct wilco_keyboard_leds *wkl;
bool leds_exist;
int ret;
ret = kbbl_exist(ec, &leds_exist);
if (ret < 0) {
dev_err(ec->dev,
"Failed checking keyboard LEDs support: %d\n", ret);
return ret;
}
if (!leds_exist)
return 0;
wkl = devm_kzalloc(ec->dev, sizeof(*wkl), GFP_KERNEL);
if (!wkl)
return -ENOMEM;
wkl->ec = ec;
wkl->keyboard.name = "platform::kbd_backlight";
wkl->keyboard.max_brightness = 100;
wkl->keyboard.flags = LED_CORE_SUSPENDRESUME;
wkl->keyboard.brightness_set_blocking = wilco_keyboard_leds_set;
ret = kbbl_init(ec);
if (ret < 0)
return ret;
wkl->keyboard.brightness = ret;
return devm_led_classdev_register(ec->dev, &wkl->keyboard);
}
| linux-master | drivers/platform/chrome/wilco_ec/keyboard_leds.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mailbox interface for Wilco Embedded Controller
*
* Copyright 2018 Google LLC
*
* The Wilco EC is similar to a typical ChromeOS embedded controller.
* It uses the same MEC based low-level communication and a similar
* protocol, but with some important differences. The EC firmware does
* not support the same mailbox commands so it is not registered as a
* cros_ec device type.
*
* Most messages follow a standard format, but there are some exceptions
* and an interface is provided to do direct/raw transactions that do not
* make assumptions about byte placement.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/platform_device.h>
#include "../cros_ec_lpc_mec.h"
/* Version of mailbox interface */
#define EC_MAILBOX_VERSION 0
/* Command to start mailbox transaction */
#define EC_MAILBOX_START_COMMAND 0xda
/* Version of EC protocol */
#define EC_MAILBOX_PROTO_VERSION 3
/* Number of header bytes to be counted as data bytes */
#define EC_MAILBOX_DATA_EXTRA 2
/* Maximum timeout */
#define EC_MAILBOX_TIMEOUT HZ
/* EC response flags */
#define EC_CMDR_DATA BIT(0) /* Data ready for host to read */
#define EC_CMDR_PENDING BIT(1) /* Write pending to EC */
#define EC_CMDR_BUSY BIT(2) /* EC is busy processing a command */
#define EC_CMDR_CMD BIT(3) /* Last host write was a command */
/**
* wilco_ec_response_timed_out() - Wait for EC response.
* @ec: EC device.
*
* Return: true if EC timed out, false if EC did not time out.
*/
static bool wilco_ec_response_timed_out(struct wilco_ec_device *ec)
{
unsigned long timeout = jiffies + EC_MAILBOX_TIMEOUT;
do {
if (!(inb(ec->io_command->start) &
(EC_CMDR_PENDING | EC_CMDR_BUSY)))
return false;
usleep_range(100, 200);
} while (time_before(jiffies, timeout));
return true;
}
/**
* wilco_ec_checksum() - Compute 8-bit checksum over data range.
* @data: Data to checksum.
* @size: Number of bytes to checksum.
*
* Return: 8-bit checksum of provided data.
*/
static u8 wilco_ec_checksum(const void *data, size_t size)
{
u8 *data_bytes = (u8 *)data;
u8 checksum = 0;
size_t i;
for (i = 0; i < size; i++)
checksum += data_bytes[i];
return checksum;
}
/**
* wilco_ec_prepare() - Prepare the request structure for the EC.
* @msg: EC message with request information.
* @rq: EC request structure to fill.
*/
static void wilco_ec_prepare(struct wilco_ec_message *msg,
struct wilco_ec_request *rq)
{
memset(rq, 0, sizeof(*rq));
rq->struct_version = EC_MAILBOX_PROTO_VERSION;
rq->mailbox_id = msg->type;
rq->mailbox_version = EC_MAILBOX_VERSION;
rq->data_size = msg->request_size;
/* Checksum header and data */
rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size);
rq->checksum = -rq->checksum;
}
/**
* wilco_ec_transfer() - Perform actual data transfer.
* @ec: EC device.
* @msg: EC message data for request and response.
* @rq: Filled in request structure
*
* Context: ec->mailbox_lock should be held while using this function.
* Return: number of bytes received or negative error code on failure.
*/
static int wilco_ec_transfer(struct wilco_ec_device *ec,
struct wilco_ec_message *msg,
struct wilco_ec_request *rq)
{
struct wilco_ec_response *rs;
u8 checksum;
u8 flag;
/* Write request header, then data */
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
msg->request_data);
/* Start the command */
outb(EC_MAILBOX_START_COMMAND, ec->io_command->start);
/* For some commands (eg shutdown) the EC will not respond, that's OK */
if (msg->flags & WILCO_EC_FLAG_NO_RESPONSE) {
dev_dbg(ec->dev, "EC does not respond to this command\n");
return 0;
}
/* Wait for it to complete */
if (wilco_ec_response_timed_out(ec)) {
dev_dbg(ec->dev, "response timed out\n");
return -ETIMEDOUT;
}
/* Check result */
flag = inb(ec->io_data->start);
if (flag) {
dev_dbg(ec->dev, "bad response: 0x%02x\n", flag);
return -EIO;
}
/* Read back response */
rs = ec->data_buffer;
checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0,
sizeof(*rs) + EC_MAILBOX_DATA_SIZE,
(u8 *)rs);
if (checksum) {
dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum);
return -EBADMSG;
}
if (rs->result) {
dev_dbg(ec->dev, "EC reported failure: 0x%02x\n", rs->result);
return -EBADMSG;
}
if (rs->data_size != EC_MAILBOX_DATA_SIZE) {
dev_dbg(ec->dev, "unexpected packet size (%u != %u)\n",
rs->data_size, EC_MAILBOX_DATA_SIZE);
return -EMSGSIZE;
}
if (rs->data_size < msg->response_size) {
dev_dbg(ec->dev, "EC didn't return enough data (%u < %zu)\n",
rs->data_size, msg->response_size);
return -EMSGSIZE;
}
memcpy(msg->response_data, rs->data, msg->response_size);
return rs->data_size;
}
/**
* wilco_ec_mailbox() - Send EC request and receive EC response.
* @ec: EC device.
* @msg: EC message data for request and response.
*
* On entry msg->type, msg->request_size, and msg->request_data should all be
* filled in. If desired, msg->flags can be set.
*
* If a response is expected, msg->response_size should be set, and
* msg->response_data should point to a buffer with enough space. On exit
* msg->response_data will be filled.
*
* Return: number of bytes received or negative error code on failure.
*/
int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
{
struct wilco_ec_request *rq;
int ret;
dev_dbg(ec->dev, "type=%04x flags=%02x rslen=%zu rqlen=%zu\n",
msg->type, msg->flags, msg->response_size, msg->request_size);
mutex_lock(&ec->mailbox_lock);
/* Prepare request packet */
rq = ec->data_buffer;
wilco_ec_prepare(msg, rq);
ret = wilco_ec_transfer(ec, msg, rq);
mutex_unlock(&ec->mailbox_lock);
return ret;
}
EXPORT_SYMBOL_GPL(wilco_ec_mailbox);
| linux-master | drivers/platform/chrome/wilco_ec/mailbox.c |
/*
* Broadcom specific AMBA
* System on Chip (SoC) Host
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include "scan.h"
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_soc.h>
static u8 bcma_host_soc_read8(struct bcma_device *core, u16 offset)
{
return readb(core->io_addr + offset);
}
static u16 bcma_host_soc_read16(struct bcma_device *core, u16 offset)
{
return readw(core->io_addr + offset);
}
static u32 bcma_host_soc_read32(struct bcma_device *core, u16 offset)
{
return readl(core->io_addr + offset);
}
static void bcma_host_soc_write8(struct bcma_device *core, u16 offset,
u8 value)
{
writeb(value, core->io_addr + offset);
}
static void bcma_host_soc_write16(struct bcma_device *core, u16 offset,
u16 value)
{
writew(value, core->io_addr + offset);
}
static void bcma_host_soc_write32(struct bcma_device *core, u16 offset,
u32 value)
{
writel(value, core->io_addr + offset);
}
#ifdef CONFIG_BCMA_BLOCKIO
static void bcma_host_soc_block_read(struct bcma_device *core, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
void __iomem *addr = core->io_addr + offset;
switch (reg_width) {
case sizeof(u8): {
u8 *buf = buffer;
while (count) {
*buf = __raw_readb(addr);
buf++;
count--;
}
break;
}
case sizeof(u16): {
__le16 *buf = buffer;
WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
count -= 2;
}
break;
}
case sizeof(u32): {
__le32 *buf = buffer;
WARN_ON(count & 3);
while (count) {
*buf = (__force __le32)__raw_readl(addr);
buf++;
count -= 4;
}
break;
}
default:
WARN_ON(1);
}
}
static void bcma_host_soc_block_write(struct bcma_device *core,
const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
void __iomem *addr = core->io_addr + offset;
switch (reg_width) {
case sizeof(u8): {
const u8 *buf = buffer;
while (count) {
__raw_writeb(*buf, addr);
buf++;
count--;
}
break;
}
case sizeof(u16): {
const __le16 *buf = buffer;
WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
count -= 2;
}
break;
}
case sizeof(u32): {
const __le32 *buf = buffer;
WARN_ON(count & 3);
while (count) {
__raw_writel((__force u32)(*buf), addr);
buf++;
count -= 4;
}
break;
}
default:
WARN_ON(1);
}
}
#endif /* CONFIG_BCMA_BLOCKIO */
static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset)
{
if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n"))
return ~0;
return readl(core->io_wrap + offset);
}
static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
u32 value)
{
if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n"))
return;
writel(value, core->io_wrap + offset);
}
static const struct bcma_host_ops bcma_host_soc_ops = {
.read8 = bcma_host_soc_read8,
.read16 = bcma_host_soc_read16,
.read32 = bcma_host_soc_read32,
.write8 = bcma_host_soc_write8,
.write16 = bcma_host_soc_write16,
.write32 = bcma_host_soc_write32,
#ifdef CONFIG_BCMA_BLOCKIO
.block_read = bcma_host_soc_block_read,
.block_write = bcma_host_soc_block_write,
#endif
.aread32 = bcma_host_soc_aread32,
.awrite32 = bcma_host_soc_awrite32,
};
int __init bcma_host_soc_register(struct bcma_soc *soc)
{
struct bcma_bus *bus = &soc->bus;
/* iomap only first core. We have to read some register on this core
* to scan the bus.
*/
bus->mmio = ioremap(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
if (!bus->mmio)
return -ENOMEM;
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
return 0;
}
int __init bcma_host_soc_init(struct bcma_soc *soc)
{
struct bcma_bus *bus = &soc->bus;
int err;
/* Scan bus and initialize it */
err = bcma_bus_early_register(bus);
if (err)
iounmap(bus->mmio);
return err;
}
#ifdef CONFIG_OF
static int bcma_host_soc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct bcma_bus *bus;
int err;
/* Alloc */
bus = devm_kzalloc(dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
bus->dev = dev;
/* Map MMIO */
bus->mmio = of_iomap(np, 0);
if (!bus->mmio)
return -ENOMEM;
/* Host specific */
bus->hosttype = BCMA_HOSTTYPE_SOC;
bus->ops = &bcma_host_soc_ops;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
/* Register */
err = bcma_bus_register(bus);
if (err)
goto err_unmap_mmio;
platform_set_drvdata(pdev, bus);
return err;
err_unmap_mmio:
iounmap(bus->mmio);
return err;
}
static int bcma_host_soc_remove(struct platform_device *pdev)
{
struct bcma_bus *bus = platform_get_drvdata(pdev);
bcma_bus_unregister(bus);
iounmap(bus->mmio);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id bcma_host_soc_of_match[] = {
{ .compatible = "brcm,bus-axi", },
{},
};
MODULE_DEVICE_TABLE(of, bcma_host_soc_of_match);
static struct platform_driver bcma_host_soc_driver = {
.driver = {
.name = "bcma-host-soc",
.of_match_table = bcma_host_soc_of_match,
},
.probe = bcma_host_soc_probe,
.remove = bcma_host_soc_remove,
};
int __init bcma_host_soc_register_driver(void)
{
return platform_driver_register(&bcma_host_soc_driver);
}
void __exit bcma_host_soc_unregister_driver(void)
{
platform_driver_unregister(&bcma_host_soc_driver);
}
#endif /* CONFIG_OF */
| linux-master | drivers/bcma/host_soc.c |
/*
* Broadcom specific AMBA
* ChipCommon serial flash interface
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
static struct resource bcma_sflash_resource = {
.name = "bcma_sflash",
.start = BCMA_SOC_FLASH2,
.end = 0,
.flags = IORESOURCE_MEM | IORESOURCE_READONLY,
};
struct platform_device bcma_sflash_dev = {
.name = "bcma_sflash",
.resource = &bcma_sflash_resource,
.num_resources = 1,
};
struct bcma_sflash_tbl_e {
char *name;
u32 id;
u32 blocksize;
u16 numblocks;
};
static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P20", 0x11, 0x10000, 4, },
{ "M25P40", 0x12, 0x10000, 8, },
{ "M25P16", 0x14, 0x10000, 32, },
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
{ "MX25L25635F", 0x18, 0x10000, 512, },
{ NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
{ "SST25WF512", 1, 0x1000, 16, },
{ "SST25VF512", 0x48, 0x1000, 16, },
{ "SST25WF010", 2, 0x1000, 32, },
{ "SST25VF010", 0x49, 0x1000, 32, },
{ "SST25WF020", 3, 0x1000, 64, },
{ "SST25VF020", 0x43, 0x1000, 64, },
{ "SST25WF040", 4, 0x1000, 128, },
{ "SST25VF040", 0x44, 0x1000, 128, },
{ "SST25VF040B", 0x8d, 0x1000, 128, },
{ "SST25WF080", 5, 0x1000, 256, },
{ "SST25VF080B", 0x8e, 0x1000, 256, },
{ "SST25VF016", 0x41, 0x1000, 512, },
{ "SST25VF032", 0x4a, 0x1000, 1024, },
{ "SST25VF064", 0x4b, 0x1000, 2048, },
{ NULL },
};
static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
{ "AT45DB011", 0xc, 256, 512, },
{ "AT45DB021", 0x14, 256, 1024, },
{ "AT45DB041", 0x1c, 256, 2048, },
{ "AT45DB081", 0x24, 256, 4096, },
{ "AT45DB161", 0x2c, 512, 4096, },
{ "AT45DB321", 0x34, 512, 8192, },
{ "AT45DB642", 0x3c, 1024, 8192, },
{ NULL },
};
static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
{
int i;
bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
BCMA_CC_FLASHCTL_START | opcode);
for (i = 0; i < 1000; i++) {
if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
BCMA_CC_FLASHCTL_BUSY))
return;
cpu_relax();
}
bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
}
/* Initialize serial flash access */
int bcma_sflash_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
struct bcma_sflash *sflash = &cc->sflash;
const struct bcma_sflash_tbl_e *e;
u32 id, id2;
switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
switch (id) {
case 0xbf:
for (e = bcma_sflash_sst_tbl; e->name; e++) {
if (e->id == id2)
break;
}
break;
case 0x13:
return -ENOTSUPP;
default:
for (e = bcma_sflash_st_tbl; e->name; e++) {
if (e->id == id)
break;
}
break;
}
if (!e->name) {
bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
return -ENOTSUPP;
}
break;
case BCMA_CC_FLASHT_ATSER:
bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
for (e = bcma_sflash_at_tbl; e->name; e++) {
if (e->id == id)
break;
}
if (!e->name) {
bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
return -ENOTSUPP;
}
break;
default:
bcma_err(bus, "Unsupported flash type\n");
return -ENOTSUPP;
}
sflash->blocksize = e->blocksize;
sflash->numblocks = e->numblocks;
sflash->size = sflash->blocksize * sflash->numblocks;
sflash->present = true;
bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
e->name, sflash->size / 1024, sflash->blocksize,
sflash->numblocks);
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
sflash->size;
bcma_sflash_dev.dev.platform_data = sflash;
return 0;
}
| linux-master | drivers/bcma/driver_chipcommon_sflash.c |
/*
* Broadcom specific AMBA
* PCI Core in hostmode
*
* Copyright 2005 - 2011, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <[email protected]>
* Copyright 2011, 2012, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
#include <asm/paccess.h>
/* Probe a 32bit value on the bus and catch bus exceptions.
* Returns nonzero on a bus exception.
* This is MIPS specific */
#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
/* Assume one-hot slot wiring */
#define BCMA_PCI_SLOT_MAX 16
#define PCI_CONFIG_SPACE_SIZE 256
bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u16 chipid_top;
u32 tmp;
chipid_top = (bus->chipinfo.id & 0xFF00);
if (chipid_top != 0x4700 &&
chipid_top != 0x5300)
return false;
bcma_core_enable(pc->core, 0);
return !mips_busprobe32(tmp, pc->core->io_addr);
}
static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
{
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
}
static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
u32 data)
{
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
}
static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
unsigned int func, unsigned int off)
{
u32 addr = 0;
/* Issue config commands only when the data link is up (at least
* one external pcie device is present).
*/
if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
& BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
goto out;
/* Type 0 transaction */
/* Slide the PCI window to the appropriate slot */
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
/* Calculate the address */
addr = pc->host_controller->host_cfg_addr;
addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
addr |= (off & ~3);
out:
return addr;
}
static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
unsigned int func, unsigned int off,
void *buf, int len)
{
int err = -EINVAL;
u32 addr, val;
void __iomem *mmio = 0;
WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
if (dev == 0) {
/* we support only two functions on device 0 */
if (func > 1)
goto out;
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
if (off >= PCI_CONFIG_SPACE_SIZE) {
addr = (func << 12);
addr |= (off & 0x0FFC);
val = bcma_pcie_read_config(pc, addr);
} else {
addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
addr |= (off & 0xFC);
val = pcicore_read32(pc, addr);
}
} else {
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
if (unlikely(!addr))
goto out;
err = -ENOMEM;
mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
if (mips_busprobe32(val, mmio)) {
val = 0xFFFFFFFF;
goto unmap;
}
}
val >>= (8 * (off & 3));
switch (len) {
case 1:
*((u8 *)buf) = (u8)val;
break;
case 2:
*((u16 *)buf) = (u16)val;
break;
case 4:
*((u32 *)buf) = (u32)val;
break;
}
err = 0;
unmap:
if (mmio)
iounmap(mmio);
out:
return err;
}
static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
unsigned int func, unsigned int off,
const void *buf, int len)
{
int err = -EINVAL;
u32 addr, val;
void __iomem *mmio = 0;
u16 chipid = pc->core->bus->chipinfo.id;
WARN_ON(!pc->hostmode);
if (unlikely(len != 1 && len != 2 && len != 4))
goto out;
if (dev == 0) {
/* we support only two functions on device 0 */
if (func > 1)
goto out;
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
if (off >= PCI_CONFIG_SPACE_SIZE) {
addr = (func << 12);
addr |= (off & 0x0FFC);
val = bcma_pcie_read_config(pc, addr);
} else {
addr = BCMA_CORE_PCI_PCICFG0;
addr |= (func << 8);
addr |= (off & 0xFC);
val = pcicore_read32(pc, addr);
}
} else {
addr = bcma_get_cfgspace_addr(pc, dev, func, off);
if (unlikely(!addr))
goto out;
err = -ENOMEM;
mmio = ioremap(addr, sizeof(val));
if (!mmio)
goto out;
if (mips_busprobe32(val, mmio)) {
val = 0xFFFFFFFF;
goto unmap;
}
}
switch (len) {
case 1:
val &= ~(0xFF << (8 * (off & 3)));
val |= *((const u8 *)buf) << (8 * (off & 3));
break;
case 2:
val &= ~(0xFFFF << (8 * (off & 3)));
val |= *((const u16 *)buf) << (8 * (off & 3));
break;
case 4:
val = *((const u32 *)buf);
break;
}
if (dev == 0) {
/* accesses to config registers with offsets >= 256
* requires indirect access.
*/
if (off >= PCI_CONFIG_SPACE_SIZE)
bcma_pcie_write_config(pc, addr, val);
else
pcicore_write32(pc, addr, val);
} else {
writel(val, mmio);
if (chipid == BCMA_CHIP_ID_BCM4716 ||
chipid == BCMA_CHIP_ID_BCM4748)
readl(mmio);
}
err = 0;
unmap:
if (mmio)
iounmap(mmio);
out:
return err;
}
static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
unsigned int devfn,
int reg, int size, u32 *val)
{
unsigned long flags;
int err;
struct bcma_drv_pci *pc;
struct bcma_drv_pci_host *pc_host;
pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
pc = pc_host->pdev;
spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
PCI_FUNC(devfn), reg, val, size);
spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
unsigned int devfn,
int reg, int size, u32 val)
{
unsigned long flags;
int err;
struct bcma_drv_pci *pc;
struct bcma_drv_pci_host *pc_host;
pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
pc = pc_host->pdev;
spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
PCI_FUNC(devfn), reg, &val, size);
spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
/* return cap_offset if requested capability exists in the PCI config space */
static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
unsigned int func, u8 req_cap_id,
unsigned char *buf, u32 *buflen)
{
u8 cap_id;
u8 cap_ptr = 0;
u32 bufsize;
u8 byte_val;
/* check for Header type 0 */
bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
sizeof(u8));
if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
return cap_ptr;
/* check if the capability pointer field exists */
bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
sizeof(u8));
if (!(byte_val & PCI_STATUS_CAP_LIST))
return cap_ptr;
/* check if the capability pointer is 0x00 */
bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
sizeof(u8));
if (cap_ptr == 0x00)
return cap_ptr;
/* loop through the capability list and see if the requested capability
* exists */
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
while (cap_id != req_cap_id) {
bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
sizeof(u8));
if (cap_ptr == 0x00)
return cap_ptr;
bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
sizeof(u8));
}
/* found the caller requested capability */
if ((buf != NULL) && (buflen != NULL)) {
u8 cap_data;
bufsize = *buflen;
if (!bufsize)
return cap_ptr;
*buflen = 0;
/* copy the capability data excluding cap ID and next ptr */
cap_data = cap_ptr + 2;
if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
*buflen = bufsize;
while (bufsize--) {
bcma_extpci_read_config(pc, dev, func, cap_data, buf,
sizeof(u8));
cap_data++;
buf++;
}
}
return cap_ptr;
}
/* If the root port is capable of returning Config Request
* Retry Status (CRS) Completion Status to software then
* enable the feature.
*/
static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
u16 val16;
int i;
cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
NULL);
root_cap = cap_ptr + PCI_EXP_RTCAP;
bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
/* Enable CRS software visibility */
root_ctrl = cap_ptr + PCI_EXP_RTCTL;
val16 = PCI_EXP_RTCTL_CRSSVE;
bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
sizeof(u16));
/* Initiate a configuration request to read the vendor id
* field of the device function's config space header after
* 100 ms wait time from the end of Reset. If the device is
* not done with its internal initialization, it must at
* least return a completion TLP, with a completion status
* of "Configuration Request Retry Status (CRS)". The root
* complex must complete the request to the host by returning
* a read-data value of 0001h for the Vendor ID field and
* all 1s for any additional bytes included in the request.
* Poll using the config reads for max wait time of 1 sec or
* until we receive the successful completion status. Repeat
* the procedure for all the devices.
*/
for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
for (i = 0; i < 100000; i++) {
bcma_extpci_read_config(pc, dev, 0,
PCI_VENDOR_ID, &val16,
sizeof(val16));
if (val16 != 0x1)
break;
udelay(10);
}
if (val16 == 0x1)
bcma_err(bus, "PCI: Broken device in slot %d\n",
dev);
}
}
}
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
struct bcma_drv_pci_host *pc_host;
u32 tmp;
u32 pci_membase_1G;
unsigned long io_map_base;
bcma_info(bus, "PCIEcore in host mode found\n");
if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
bcma_info(bus, "This PCIE core is disabled and not working\n");
return;
}
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
bcma_err(bus, "can not allocate memory");
return;
}
spin_lock_init(&pc_host->cfgspace_lock);
pc->host_controller = pc_host;
pc_host->pci_controller.io_resource = &pc_host->io_resource;
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
pc_host->pdev = pc;
pci_membase_1G = BCMA_SOC_PCI_DMA;
pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
pc_host->mem_resource.name = "BCMA PCIcore external memory";
pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
pc_host->io_resource.name = "BCMA PCIcore external I/O";
pc_host->io_resource.start = 0x100;
pc_host->io_resource.end = 0x7FF;
pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
/* Reset RC */
usleep_range(3000, 5000);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
msleep(50);
pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
BCMA_CORE_PCI_CTL_RST_OE);
/* 64 MB I/O access window. On 4716, use
* sbtopcie0 to access the device registers. We
* can't use address match 2 (1 GB window) region
* as mips can't generate 64-bit address on the
* backplane.
*/
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
} else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
if (pc->core->core_unit == 0) {
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pc_host->io_resource.start = 0x100;
pc_host->io_resource.end = 0x47F;
pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
tmp | BCMA_SOC_PCI_MEM);
} else if (pc->core->core_unit == 1) {
pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pc_host->io_resource.start = 0x480;
pc_host->io_resource.end = 0x7FF;
pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
tmp | BCMA_SOC_PCI1_MEM);
}
} else
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
BCMA_CORE_PCI_SBTOPCI_IO);
/* 64 MB configuration access window */
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
/* 1 GB memory access window */
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
/* As per PCI Express Base Spec 1.1 we need to wait for
* at least 100 ms from the end of a reset (cold/warm/hot)
* before issuing configuration requests to PCI Express
* devices.
*/
msleep(100);
bcma_core_pci_enable_crs(pc);
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
u16 val16;
bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
&val16, sizeof(val16));
val16 |= (2 << 5); /* Max payload size of 512 */
val16 |= (2 << 12); /* MRRS 512 */
bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
&val16, sizeof(val16));
}
/* Enable PCI bridge BAR0 memory & master access */
tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
/* Enable PCI interrupts */
pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
* to non-MIPS platform. */
io_map_base = (unsigned long)ioremap(pc_host->mem_resource.start,
resource_size(&pc_host->mem_resource));
pc_host->pci_controller.io_map_base = io_map_base;
set_io_port_base(pc_host->pci_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
* values. Not waiting at this point causes crashes of the machine. */
usleep_range(10000, 15000);
register_pci_controller(&pc_host->pci_controller);
return;
}
/* Early PCI fixup for a device on the PCI-core bridge. */
static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
{
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
return;
}
if (PCI_SLOT(dev->devfn) != 0)
return;
pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
/* Enable PCI bridge bus mastering and memory space */
pci_set_master(dev);
if (pcibios_enable_device(dev, ~0) < 0) {
pr_err("PCI: BCMA bridge enable failed\n");
return;
}
/* Enable PCI bridge BAR1 prefetch and burst */
pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
/* Early PCI fixup for all PCI-cores to set the correct memory address. */
static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
{
struct resource *res;
int pos, err;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
return;
}
if (PCI_SLOT(dev->devfn) == 0)
return;
pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
for (pos = 0; pos < 6; pos++) {
res = &dev->resource[pos];
if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
err = pci_assign_resource(dev, pos);
if (err)
pr_err("PCI: Problem fixing up the addresses on %s\n",
pci_name(dev));
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
/* This function is called when doing a pci_enable_device().
* We must first check if the device is a device on the PCI-core bridge. */
int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
{
struct bcma_drv_pci_host *pc_host;
int readrq;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
return -ENODEV;
}
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
pci_ops);
pr_info("PCI: Fixing up device %s\n", pci_name(dev));
/* Fix up interrupt lines */
dev->irq = bcma_core_irq(pc_host->pdev->core, 0);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
readrq = pcie_get_readrq(dev);
if (readrq > 128) {
pr_info("change PCIe max read request size from %i to 128\n", readrq);
pcie_set_readrq(dev, 128);
}
return 0;
}
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
/* PCI device IRQ mapping. */
int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
{
struct bcma_drv_pci_host *pc_host;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
return -ENODEV;
}
pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
pci_ops);
return bcma_core_irq(pc_host->pdev->core, 0);
}
EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
| linux-master | drivers/bcma/driver_pci_host.c |
/*
* Broadcom specific AMBA
* Broadcom MIPS32 74K core driver
*
* Copyright 2009, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <[email protected]>
* Copyright 2010, Bernhard Loos <[email protected]>
* Copyright 2011, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/time.h>
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
#endif
enum bcma_boot_dev {
BCMA_BOOT_DEV_UNK = 0,
BCMA_BOOT_DEV_ROM,
BCMA_BOOT_DEV_PARALLEL,
BCMA_BOOT_DEV_SERIAL,
BCMA_BOOT_DEV_NAND,
};
/* The 47162a0 hangs when reading MIPS DMP registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
}
/* The 5357b0 hangs when reading USB20H DMP registers */
static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
{
return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
dev->bus->chipinfo.pkg == 11 &&
dev->id.id == BCMA_CORE_USB20_HOST;
}
static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
{
u32 flag;
if (bcma_core_mips_bcm47162a0_quirk(dev))
return dev->core_index;
if (bcma_core_mips_bcm5357b0_quirk(dev))
return dev->core_index;
flag = bcma_aread32(dev, BCMA_MIPS_OOBSELOUTA30);
if (flag)
return flag & 0x1F;
else
return 0x3f;
}
/* Get the MIPS IRQ assignment for a specified device.
* If unassigned, 0 is returned.
* If disabled, 5 is returned.
* If not supported, 6 is returned.
*/
unsigned int bcma_core_mips_irq(struct bcma_device *dev)
{
struct bcma_device *mdev = dev->bus->drv_mips.core;
u32 irqflag;
unsigned int irq;
irqflag = bcma_core_mips_irqflag(dev);
if (irqflag == 0x3f)
return 6;
for (irq = 0; irq <= 4; irq++)
if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
(1 << irqflag))
return irq;
return 5;
}
static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
{
unsigned int oldirq = bcma_core_mips_irq(dev);
struct bcma_bus *bus = dev->bus;
struct bcma_device *mdev = bus->drv_mips.core;
u32 irqflag;
irqflag = bcma_core_mips_irqflag(dev);
BUG_ON(oldirq == 6);
dev->irq = irq + 2;
/* clear the old irq */
if (oldirq == 0)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
~(1 << irqflag));
else if (oldirq != 5)
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
/* assign the new one */
if (irq == 0) {
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
(1 << irqflag));
} else {
u32 irqinitmask = bcma_read32(mdev,
BCMA_MIPS_MIPS74K_INTMASK(irq));
if (irqinitmask) {
struct bcma_device *core;
/* backplane irq line is in use, find out who uses
* it and set user to irq 0
*/
list_for_each_entry(core, &bus->cores, list) {
if ((1 << bcma_core_mips_irqflag(core)) ==
irqinitmask) {
bcma_core_mips_set_irq(core, 0);
break;
}
}
}
bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq),
1 << irqflag);
}
bcma_debug(bus, "set_irq: core 0x%04x, irq %d => %d\n",
dev->id.id, oldirq <= 4 ? oldirq + 2 : 0, irq + 2);
}
static void bcma_core_mips_set_irq_name(struct bcma_bus *bus, unsigned int irq,
u16 coreid, u8 unit)
{
struct bcma_device *core;
core = bcma_find_core_unit(bus, coreid, unit);
if (!core) {
bcma_warn(bus,
"Can not find core (id: 0x%x, unit %i) for IRQ configuration.\n",
coreid, unit);
return;
}
bcma_core_mips_set_irq(core, irq);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
char interrupts[25];
char *ints = interrupts;
for (i = 0; i < ARRAY_SIZE(irq_name); i++)
ints += sprintf(ints, " %s%c",
irq_name[i], i == irq ? '*' : ' ');
bcma_debug(dev->bus, "core 0x%04x, irq:%s\n", dev->id.id, interrupts);
}
static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
}
}
u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
return bcma_pmu_get_cpu_clock(&bus->drv_cc);
bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
return 0;
}
EXPORT_SYMBOL(bcma_cpu_clock);
static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus)
{
struct bcma_drv_cc *cc = &bus->drv_cc;
u8 cc_rev = cc->core->id.rev;
if (cc_rev == 42) {
struct bcma_device *core;
core = bcma_find_core(bus, BCMA_CORE_NS_ROM);
if (core) {
switch (bcma_aread32(core, BCMA_IOST) &
BCMA_NS_ROM_IOST_BOOT_DEV_MASK) {
case BCMA_NS_ROM_IOST_BOOT_DEV_NOR:
return BCMA_BOOT_DEV_SERIAL;
case BCMA_NS_ROM_IOST_BOOT_DEV_NAND:
return BCMA_BOOT_DEV_NAND;
case BCMA_NS_ROM_IOST_BOOT_DEV_ROM:
default:
return BCMA_BOOT_DEV_ROM;
}
}
} else {
if (cc_rev == 38) {
if (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT)
return BCMA_BOOT_DEV_NAND;
else if (cc->status & BIT(5))
return BCMA_BOOT_DEV_ROM;
}
if ((cc->capabilities & BCMA_CC_CAP_FLASHT) ==
BCMA_CC_FLASHT_PARA)
return BCMA_BOOT_DEV_PARALLEL;
else
return BCMA_BOOT_DEV_SERIAL;
}
return BCMA_BOOT_DEV_SERIAL;
}
static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
enum bcma_boot_dev boot_dev;
/* Determine flash type this SoC boots from */
boot_dev = bcma_boot_dev(bus);
switch (boot_dev) {
case BCMA_BOOT_DEV_PARALLEL:
case BCMA_BOOT_DEV_SERIAL:
#ifdef CONFIG_BCM47XX
bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH2,
BCMA_SOC_FLASH2_SZ);
#endif
break;
case BCMA_BOOT_DEV_NAND:
#ifdef CONFIG_BCM47XX
bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH1,
BCMA_SOC_FLASH1_SZ);
#endif
break;
default:
break;
}
}
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
if (mcore->early_setup_done)
return;
bcma_chipco_serial_init(&bus->drv_cc);
bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;
}
static void bcma_fix_i2s_irqflag(struct bcma_bus *bus)
{
struct bcma_device *cpu, *pcie, *i2s;
/* Fixup the interrupts in 4716/4748 for i2s core (2010 Broadcom SDK)
* (IRQ flags > 7 are ignored when setting the interrupt masks)
*/
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4716 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM4748)
return;
cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
pcie = bcma_find_core(bus, BCMA_CORE_PCIE);
i2s = bcma_find_core(bus, BCMA_CORE_I2S);
if (cpu && pcie && i2s &&
bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
bcma_aread32(pcie, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
bcma_aread32(i2s, BCMA_MIPS_OOBSELOUTA30) == 0x88) {
bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
bcma_awrite32(pcie, BCMA_MIPS_OOBSELINA74, 0x07060504);
bcma_awrite32(i2s, BCMA_MIPS_OOBSELOUTA30, 0x87);
bcma_debug(bus,
"Moved i2s interrupt to oob line 7 instead of 8\n");
}
}
void bcma_core_mips_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus;
struct bcma_device *core;
bus = mcore->core->bus;
if (mcore->setup_done)
return;
bcma_debug(bus, "Initializing MIPS core...\n");
bcma_core_mips_early_init(mcore);
bcma_fix_i2s_irqflag(bus);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_PCIE, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
break;
case BCMA_CHIP_ID_BCM5356:
case BCMA_CHIP_ID_BCM47162:
case BCMA_CHIP_ID_BCM53572:
bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
break;
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
break;
case BCMA_CHIP_ID_BCM4706:
bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_PCIE, 0);
bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_4706_MAC_GBIT,
0);
bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_PCIE, 1);
bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_USB20_HOST, 0);
bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_4706_CHIPCOMMON,
0);
break;
default:
list_for_each_entry(core, &bus->cores, list) {
core->irq = bcma_core_irq(core, 0);
}
bcma_err(bus,
"Unknown device (0x%x) found, can not configure IRQs\n",
bus->chipinfo.id);
}
bcma_debug(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
mcore->setup_done = true;
}
| linux-master | drivers/bcma/driver_mips.c |
/*
* Broadcom specific AMBA
* Bus scanning
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "scan.h"
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
struct bcma_device_id_name {
u16 id;
const char *name;
};
static const struct bcma_device_id_name bcma_arm_device_names[] = {
{ BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
{ BCMA_CORE_ARM_1176, "ARM 1176" },
{ BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
{ BCMA_CORE_ARM_CM3, "ARM CM3" },
};
static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
{ BCMA_CORE_NS_PCIEG2, "PCIe Gen 2" },
{ BCMA_CORE_NS_DMA, "DMA" },
{ BCMA_CORE_NS_SDIO3, "SDIO3" },
{ BCMA_CORE_NS_USB20, "USB 2.0" },
{ BCMA_CORE_NS_USB30, "USB 3.0" },
{ BCMA_CORE_NS_A9JTAG, "ARM Cortex A9 JTAG" },
{ BCMA_CORE_NS_DDR23, "Denali DDR2/DDR3 memory controller" },
{ BCMA_CORE_NS_ROM, "ROM" },
{ BCMA_CORE_NS_NAND, "NAND flash controller" },
{ BCMA_CORE_NS_QSPI, "SPI flash controller" },
{ BCMA_CORE_NS_CHIPCOMMON_B, "Chipcommon B" },
{ BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
{ BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" },
{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
{ BCMA_CORE_ILINE20, "ILine 20" },
{ BCMA_CORE_SRAM, "SRAM" },
{ BCMA_CORE_SDRAM, "SDRAM" },
{ BCMA_CORE_PCI, "PCI" },
{ BCMA_CORE_ETHERNET, "Fast Ethernet" },
{ BCMA_CORE_V90, "V90" },
{ BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
{ BCMA_CORE_ADSL, "ADSL" },
{ BCMA_CORE_ILINE100, "ILine 100" },
{ BCMA_CORE_IPSEC, "IPSEC" },
{ BCMA_CORE_UTOPIA, "UTOPIA" },
{ BCMA_CORE_PCMCIA, "PCMCIA" },
{ BCMA_CORE_INTERNAL_MEM, "Internal Memory" },
{ BCMA_CORE_MEMC_SDRAM, "MEMC SDRAM" },
{ BCMA_CORE_OFDM, "OFDM" },
{ BCMA_CORE_EXTIF, "EXTIF" },
{ BCMA_CORE_80211, "IEEE 802.11" },
{ BCMA_CORE_PHY_A, "PHY A" },
{ BCMA_CORE_PHY_B, "PHY B" },
{ BCMA_CORE_PHY_G, "PHY G" },
{ BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
{ BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
{ BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
{ BCMA_CORE_USB20_DEV, "USB 2.0 Device" },
{ BCMA_CORE_SDIO_HOST, "SDIO Host" },
{ BCMA_CORE_ROBOSWITCH, "Roboswitch" },
{ BCMA_CORE_PARA_ATA, "PATA" },
{ BCMA_CORE_SATA_XORDMA, "SATA XOR-DMA" },
{ BCMA_CORE_ETHERNET_GBIT, "GBit Ethernet" },
{ BCMA_CORE_PCIE, "PCIe" },
{ BCMA_CORE_PHY_N, "PHY N" },
{ BCMA_CORE_SRAM_CTL, "SRAM Controller" },
{ BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
{ BCMA_CORE_PHY_LP, "PHY LP" },
{ BCMA_CORE_PMU, "PMU" },
{ BCMA_CORE_PHY_SSN, "PHY SSN" },
{ BCMA_CORE_SDIO_DEV, "SDIO Device" },
{ BCMA_CORE_PHY_HT, "PHY HT" },
{ BCMA_CORE_MAC_GBIT, "GBit MAC" },
{ BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
{ BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
{ BCMA_CORE_OCP_OCP_BRIDGE, "OCP to OCP Bridge" },
{ BCMA_CORE_SHARED_COMMON, "Common Shared" },
{ BCMA_CORE_OCP_AHB_BRIDGE, "OCP to AHB Bridge" },
{ BCMA_CORE_SPI_HOST, "SPI Host" },
{ BCMA_CORE_I2S, "I2S" },
{ BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" },
{ BCMA_CORE_SHIM, "SHIM" },
{ BCMA_CORE_PCIE2, "PCIe Gen2" },
{ BCMA_CORE_ARM_CR4, "ARM CR4" },
{ BCMA_CORE_GCI, "GCI" },
{ BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
{ BCMA_CORE_ARM_CA7, "ARM CA7" },
{ BCMA_CORE_DEFAULT, "Default" },
};
static const struct bcma_device_id_name bcma_mips_device_names[] = {
{ BCMA_CORE_MIPS, "MIPS" },
{ BCMA_CORE_MIPS_3302, "MIPS 3302" },
{ BCMA_CORE_MIPS_74K, "MIPS 74K" },
};
static const char *bcma_device_name(const struct bcma_device_id *id)
{
const struct bcma_device_id_name *names;
int size, i;
/* search manufacturer specific names */
switch (id->manuf) {
case BCMA_MANUF_ARM:
names = bcma_arm_device_names;
size = ARRAY_SIZE(bcma_arm_device_names);
break;
case BCMA_MANUF_BCM:
names = bcma_bcm_device_names;
size = ARRAY_SIZE(bcma_bcm_device_names);
break;
case BCMA_MANUF_MIPS:
names = bcma_mips_device_names;
size = ARRAY_SIZE(bcma_mips_device_names);
break;
default:
return "UNKNOWN";
}
for (i = 0; i < size; i++) {
if (names[i].id == id->id)
return names[i].name;
}
return "UNKNOWN";
}
static u32 bcma_scan_read32(struct bcma_bus *bus, u16 offset)
{
return readl(bus->mmio + offset);
}
static void bcma_scan_switch_core(struct bcma_bus *bus, u32 addr)
{
if (bus->hosttype == BCMA_HOSTTYPE_PCI)
pci_write_config_dword(bus->host_pci, BCMA_PCI_BAR0_WIN,
addr);
}
static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent = readl(*eromptr);
(*eromptr)++;
return ent;
}
static void bcma_erom_push_ent(u32 __iomem **eromptr)
{
(*eromptr)--;
}
static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent = bcma_erom_get_ent(bus, eromptr);
if (!(ent & SCAN_ER_VALID))
return -ENOENT;
if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_CI)
return -ENOENT;
return ent;
}
static bool bcma_erom_is_end(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent = bcma_erom_get_ent(bus, eromptr);
bcma_erom_push_ent(eromptr);
return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID));
}
static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent = bcma_erom_get_ent(bus, eromptr);
bcma_erom_push_ent(eromptr);
return (((ent & SCAN_ER_VALID)) &&
((ent & SCAN_ER_TAGX) == SCAN_ER_TAG_ADDR) &&
((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE));
}
static void bcma_erom_skip_component(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent;
while (1) {
ent = bcma_erom_get_ent(bus, eromptr);
if ((ent & SCAN_ER_VALID) &&
((ent & SCAN_ER_TAG) == SCAN_ER_TAG_CI))
break;
if (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID))
break;
}
bcma_erom_push_ent(eromptr);
}
static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
{
u32 ent = bcma_erom_get_ent(bus, eromptr);
if (!(ent & SCAN_ER_VALID))
return -ENOENT;
if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_MP)
return -ENOENT;
return ent;
}
static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
u32 addrl;
u32 size;
u32 ent = bcma_erom_get_ent(bus, eromptr);
if ((!(ent & SCAN_ER_VALID)) ||
((ent & SCAN_ER_TAGX) != SCAN_ER_TAG_ADDR) ||
((ent & SCAN_ADDR_TYPE) != type) ||
(((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
bcma_erom_push_ent(eromptr);
return (u32)-EINVAL;
}
addrl = ent & SCAN_ADDR_ADDR;
if (ent & SCAN_ADDR_AG32)
bcma_erom_get_ent(bus, eromptr);
if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
size = bcma_erom_get_ent(bus, eromptr);
if (size & SCAN_SIZE_SG32)
bcma_erom_get_ent(bus, eromptr);
}
return addrl;
}
static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus,
u16 index)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
if (core->core_index == index)
return core;
}
return NULL;
}
static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 coreid)
{
struct bcma_device *core;
list_for_each_entry_reverse(core, &bus->cores, list) {
if (core->id.id == coreid)
return core;
}
return NULL;
}
#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
struct bcma_device_id *match, int core_num,
struct bcma_device *core)
{
u32 tmp;
u8 i, j, k;
s32 cia, cib;
u8 ports[2], wrappers[2];
/* get CIs */
cia = bcma_erom_get_ci(bus, eromptr);
if (cia < 0) {
bcma_erom_push_ent(eromptr);
if (bcma_erom_is_end(bus, eromptr))
return -ESPIPE;
return -EILSEQ;
}
cib = bcma_erom_get_ci(bus, eromptr);
if (cib < 0)
return -EILSEQ;
/* parse CIs */
core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
if (((core->id.manuf == BCMA_MANUF_ARM) &&
(core->id.id == 0xFFF)) ||
(ports[1] == 0)) {
bcma_erom_skip_component(bus, eromptr);
return -ENXIO;
}
/* check if component is a core at all */
if (wrappers[0] + wrappers[1] == 0) {
/* Some specific cores don't need wrappers */
switch (core->id.id) {
case BCMA_CORE_4706_MAC_GBIT_COMMON:
case BCMA_CORE_NS_CHIPCOMMON_B:
case BCMA_CORE_PMU:
case BCMA_CORE_GCI:
/* Not used yet: case BCMA_CORE_OOB_ROUTER: */
break;
default:
bcma_erom_skip_component(bus, eromptr);
return -ENXIO;
}
}
if (bcma_erom_is_bridge(bus, eromptr)) {
bcma_erom_skip_component(bus, eromptr);
return -ENXIO;
}
if (bcma_find_core_by_index(bus, core_num)) {
bcma_erom_skip_component(bus, eromptr);
return -ENODEV;
}
if (match && ((match->manuf != BCMA_ANY_MANUF &&
match->manuf != core->id.manuf) ||
(match->id != BCMA_ANY_ID && match->id != core->id.id) ||
(match->rev != BCMA_ANY_REV && match->rev != core->id.rev) ||
(match->class != BCMA_ANY_CLASS && match->class != core->id.class)
)) {
bcma_erom_skip_component(bus, eromptr);
return -ENODEV;
}
/* get & parse master ports */
for (i = 0; i < ports[0]; i++) {
s32 mst_port_d = bcma_erom_get_mst_port(bus, eromptr);
if (mst_port_d < 0)
return -EILSEQ;
}
/* First Slave Address Descriptor should be port 0:
* the main register space for the core
*/
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
/* Try again to see if it is a bridge */
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_BRIDGE, 0);
if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
return -EILSEQ;
} else {
bcma_info(bus, "Bridge found\n");
return -ENXIO;
}
}
core->addr = tmp;
/* get & parse slave ports */
k = 0;
for (i = 0; i < ports[1]; i++) {
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SLAVE, i);
if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: slave port %d "
* "has %d descriptors\n", i, j); */
break;
} else if (k < ARRAY_SIZE(core->addr_s)) {
core->addr_s[k] = tmp;
k++;
}
}
}
/* get & parse master wrappers */
for (i = 0; i < wrappers[0]; i++) {
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_MWRAP, i);
if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* "has %d descriptors\n", i, j); */
break;
} else {
if (i == 0 && j == 0)
core->wrap = tmp;
}
}
}
/* get & parse slave wrappers */
for (i = 0; i < wrappers[1]; i++) {
u8 hack = (ports[1] == 1) ? 0 : 1;
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SWRAP, i + hack);
if (IS_ERR_VALUE_U32(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* has %d descriptors\n", i, j); */
break;
} else {
if (wrappers[0] == 0 && !i && !j)
core->wrap = tmp;
}
}
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
core->io_addr = ioremap(core->addr, BCMA_CORE_SIZE);
if (!core->io_addr)
return -ENOMEM;
if (core->wrap) {
core->io_wrap = ioremap(core->wrap,
BCMA_CORE_SIZE);
if (!core->io_wrap) {
iounmap(core->io_addr);
return -ENOMEM;
}
}
}
return 0;
}
void bcma_detect_chip(struct bcma_bus *bus)
{
s32 tmp;
struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
char chip_id[8];
bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
tmp = bcma_scan_read32(bus, BCMA_CC_ID);
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
snprintf(chip_id, ARRAY_SIZE(chip_id),
(chipinfo->id > 0x9999) ? "%d" : "0x%04X", chipinfo->id);
bcma_info(bus, "Found chip with id %s, rev 0x%02X and package 0x%02X\n",
chip_id, chipinfo->rev, chipinfo->pkg);
}
int bcma_bus_scan(struct bcma_bus *bus)
{
u32 erombase;
u32 __iomem *eromptr, *eromend;
int err, core_num = 0;
/* Skip if bus was already scanned (e.g. during early register) */
if (bus->nr_cores)
return 0;
erombase = bcma_scan_read32(bus, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
eromptr = ioremap(erombase, BCMA_CORE_SIZE);
if (!eromptr)
return -ENOMEM;
} else {
eromptr = bus->mmio;
}
eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
bcma_scan_switch_core(bus, erombase);
while (eromptr < eromend) {
struct bcma_device *other_core;
struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
if (!core) {
err = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&core->list);
core->bus = bus;
err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
if (err < 0) {
kfree(core);
if (err == -ENODEV) {
core_num++;
continue;
} else if (err == -ENXIO) {
continue;
} else if (err == -ESPIPE) {
break;
}
goto out;
}
core->core_index = core_num++;
bus->nr_cores++;
other_core = bcma_find_core_reverse(bus, core->id.id);
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
bcma_prepare_core(bus, core);
bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
list_add_tail(&core->list, &bus->cores);
}
err = 0;
out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr);
return err;
}
| linux-master | drivers/bcma/scan.c |
/*
* Broadcom specific AMBA
* PCIe Gen 2 Core
*
* Copyright 2014, Broadcom Corporation
* Copyright 2014, Rafał Miłecki <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/pci.h>
/**************************************************
* R/W ops.
**************************************************/
#if 0
static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr)
{
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR);
return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
}
#endif
static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr,
u32 val)
{
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val);
}
/**************************************************
* Init.
**************************************************/
static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2,
bool enable)
{
u32 val;
/* restore back to default */
val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
val |= PCIE2_CLKC_DLYPERST;
val &= ~PCIE2_CLKC_DISSPROMLD;
if (enable) {
val &= ~PCIE2_CLKC_DLYPERST;
val |= PCIE2_CLKC_DISSPROMLD;
}
pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val);
/* flush */
return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
}
static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2)
{
/* LTR0 */
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c);
/* LTR1 */
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864);
/* LTR2 */
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003);
}
static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
{
u8 core_rev = pcie2->core->id.rev;
u32 devstsctr2;
if (core_rev < 2 || core_rev == 10 || core_rev > 13)
return;
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
PCIE2_CAP_DEVSTSCTRL2_OFFSET);
devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) {
/* force the right LTR values */
bcma_core_pcie2_set_ltr_vals(pcie2);
/* TODO:
*si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
*/
/* enable the LTR */
devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
PCIE2_CAP_DEVSTSCTRL2_OFFSET);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2);
/* set the LTR state to be active */
pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
PCIE2_LTR_ACTIVE);
usleep_range(1000, 2000);
/* set the LTR state to be sleep */
pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
PCIE2_LTR_SLEEP);
usleep_range(1000, 2000);
}
}
static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2)
{
u8 core_rev = pcie2->core->id.rev;
bool pciewar160, pciewar162;
pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11;
pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 ||
core_rev == 9 || core_rev == 11;
if (!pciewar160 && !pciewar162)
return;
/* TODO */
#if 0
pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL,
PCIE_DISABLE_L1CLK_GATING);
#if 0
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
PCIEGEN2_COE_PVT_TL_CTRL_0);
pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA,
~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT));
#endif
#endif
}
static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2)
{
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP);
pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f);
}
static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2)
{
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0);
}
static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
{
struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc;
u8 core_rev = pcie2->core->id.rev;
u32 alp_khz, pm_value;
if (core_rev <= 13) {
alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000;
pm_value = (1000000 * 2) / alp_khz;
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
PCIE2_PVT_REG_PM_CLK_PERIOD);
pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value);
}
}
void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
{
struct bcma_bus *bus = pcie2->core->bus;
struct bcma_chipinfo *ci = &bus->chipinfo;
u32 tmp;
tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
if ((tmp & 0xe) >> 1 == 2)
bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4360:
case BCMA_CHIP_ID_BCM4352:
pcie2->reqsize = 1024;
break;
default:
pcie2->reqsize = 128;
break;
}
if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
bcma_core_pcie2_hw_ltr_war(pcie2);
pciedev_crwlpciegen2(pcie2);
pciedev_reg_pm_clk_period(pcie2);
pciedev_crwlpciegen2_180(pcie2);
pciedev_crwlpciegen2_182(pcie2);
}
/**************************************************
* Runtime ops.
**************************************************/
void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
{
struct bcma_bus *bus = pcie2->core->bus;
struct pci_dev *dev = bus->host_pci;
int err;
err = pcie_set_readrq(dev, pcie2->reqsize);
if (err)
bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
}
| linux-master | drivers/bcma/driver_pcie2.c |
/*
* Broadcom specific AMBA
* SPROM reading
*
* Copyright 2011, 2012, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
/**
* bcma_arch_register_fallback_sprom - Registers a method providing a
* fallback SPROM if no SPROM is found.
*
* @sprom_callback: The callback function.
*
* With this function the architecture implementation may register a
* callback handler which fills the SPROM data structure. The fallback is
* used for PCI based BCMA devices, where no valid SPROM can be found
* in the shadow registers and to provide the SPROM for SoCs where BCMA is
* to control the system bus.
*
* This function is useful for weird architectures that have a half-assed
* BCMA device hardwired to their PCI bus.
*
* This function is available for architecture code, only. So it is not
* exported.
*/
int bcma_arch_register_fallback_sprom(int (*sprom_callback)(struct bcma_bus *bus,
struct ssb_sprom *out))
{
if (get_fallback_sprom)
return -EEXIST;
get_fallback_sprom = sprom_callback;
return 0;
}
static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
struct ssb_sprom *out)
{
int err;
if (!get_fallback_sprom) {
err = -ENOENT;
goto fail;
}
err = get_fallback_sprom(bus, out);
if (err)
goto fail;
bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
bus->sprom.revision);
return 0;
fail:
bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
return err;
}
/**************************************************
* R/W ops.
**************************************************/
static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom,
size_t words)
{
int i;
for (i = 0; i < words; i++)
sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2));
}
/**************************************************
* Validation.
**************************************************/
static inline u8 bcma_crc8(u8 crc, u8 data)
{
/* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */
static const u8 t[] = {
0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
};
return t[crc ^ data];
}
static u8 bcma_sprom_crc(const u16 *sprom, size_t words)
{
int word;
u8 crc = 0xFF;
for (word = 0; word < words - 1; word++) {
crc = bcma_crc8(crc, sprom[word] & 0x00FF);
crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
}
crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF);
crc ^= 0xFF;
return crc;
}
static int bcma_sprom_check_crc(const u16 *sprom, size_t words)
{
u8 crc;
u8 expected_crc;
u16 tmp;
crc = bcma_sprom_crc(sprom, words);
tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC;
expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
if (crc != expected_crc)
return -EPROTO;
return 0;
}
static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom,
size_t words)
{
u16 revision;
int err;
err = bcma_sprom_check_crc(sprom, words);
if (err)
return err;
revision = sprom[words - 1] & SSB_SPROM_REVISION_REV;
if (revision < 8 || revision > 11) {
pr_err("Unsupported SPROM revision: %d\n", revision);
return -ENOENT;
}
bus->sprom.revision = revision;
bcma_debug(bus, "Found SPROM revision %d\n", revision);
return 0;
}
/**************************************************
* SPROM extraction.
**************************************************/
#define SPOFF(offset) ((offset) / sizeof(u16))
#define SPEX(_field, _offset, _mask, _shift) \
bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
#define SPEX32(_field, _offset, _mask, _shift) \
bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
#define SPEX_ARRAY8(_field, _offset, _mask, _shift) \
do { \
SPEX(_field[0], _offset + 0, _mask, _shift); \
SPEX(_field[1], _offset + 2, _mask, _shift); \
SPEX(_field[2], _offset + 4, _mask, _shift); \
SPEX(_field[3], _offset + 6, _mask, _shift); \
SPEX(_field[4], _offset + 8, _mask, _shift); \
SPEX(_field[5], _offset + 10, _mask, _shift); \
SPEX(_field[6], _offset + 12, _mask, _shift); \
SPEX(_field[7], _offset + 14, _mask, _shift); \
} while (0)
static s8 sprom_extract_antgain(const u16 *in, u16 offset, u16 mask, u16 shift)
{
u16 v;
u8 gain;
v = in[SPOFF(offset)];
gain = (v & mask) >> shift;
if (gain == 0xFF) {
gain = 8; /* If unset use 2dBm */
} else {
/* Q5.2 Fractional part is stored in 0xC0 */
gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2);
}
return (s8)gain;
}
static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
{
u16 v, o;
int i;
static const u16 pwr_info_offset[] = {
SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
};
BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
ARRAY_SIZE(bus->sprom.core_pwr_info));
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
}
SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0);
SPEX(board_type, SSB_SPROM1_SPID, ~0, 0);
SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0,
SSB_SPROM4_TXPID2G0_SHIFT);
SPEX(txpid2g[1], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G1,
SSB_SPROM4_TXPID2G1_SHIFT);
SPEX(txpid2g[2], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G2,
SSB_SPROM4_TXPID2G2_SHIFT);
SPEX(txpid2g[3], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G3,
SSB_SPROM4_TXPID2G3_SHIFT);
SPEX(txpid5gl[0], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL0,
SSB_SPROM4_TXPID5GL0_SHIFT);
SPEX(txpid5gl[1], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL1,
SSB_SPROM4_TXPID5GL1_SHIFT);
SPEX(txpid5gl[2], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL2,
SSB_SPROM4_TXPID5GL2_SHIFT);
SPEX(txpid5gl[3], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL3,
SSB_SPROM4_TXPID5GL3_SHIFT);
SPEX(txpid5g[0], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G0,
SSB_SPROM4_TXPID5G0_SHIFT);
SPEX(txpid5g[1], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G1,
SSB_SPROM4_TXPID5G1_SHIFT);
SPEX(txpid5g[2], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G2,
SSB_SPROM4_TXPID5G2_SHIFT);
SPEX(txpid5g[3], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G3,
SSB_SPROM4_TXPID5G3_SHIFT);
SPEX(txpid5gh[0], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH0,
SSB_SPROM4_TXPID5GH0_SHIFT);
SPEX(txpid5gh[1], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH1,
SSB_SPROM4_TXPID5GH1_SHIFT);
SPEX(txpid5gh[2], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH2,
SSB_SPROM4_TXPID5GH2_SHIFT);
SPEX(txpid5gh[3], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH3,
SSB_SPROM4_TXPID5GH3_SHIFT);
SPEX(boardflags_lo, SSB_SPROM8_BFLLO, ~0, 0);
SPEX(boardflags_hi, SSB_SPROM8_BFLHI, ~0, 0);
SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
/* Extract core's power info */
for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
o = pwr_info_offset[i];
SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT);
SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
SSB_SPROM8_2G_MAXP, 0);
SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0);
SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT);
SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
SSB_SPROM8_5G_MAXP, 0);
SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP,
SSB_SPROM8_5GH_MAXP, 0);
SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP,
SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT);
SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0);
SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0);
}
SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TSSIPOS,
SSB_SROM8_FEM_TSSIPOS_SHIFT);
SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_EXTPA_GAIN,
SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_PDET_RANGE,
SSB_SROM8_FEM_PDET_RANGE_SHIFT);
SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TR_ISO,
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TSSIPOS,
SSB_SROM8_FEM_TSSIPOS_SHIFT);
SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_EXTPA_GAIN,
SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_PDET_RANGE,
SSB_SROM8_FEM_PDET_RANGE_SHIFT);
SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TR_ISO,
SSB_SROM8_FEM_TR_ISO_SHIFT);
SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
SSB_SROM8_FEM_ANTSWLUT_SHIFT);
SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
SSB_SPROM8_ANTAVAIL_A_SHIFT);
SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
SSB_SPROM8_ANTAVAIL_BG_SHIFT);
SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
SSB_SPROM8_ITSSI_BG_SHIFT);
SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
SSB_SPROM8_ITSSI_A_SHIFT);
SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
SSB_SPROM8_MAXP_AL_SHIFT);
SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
SSB_SPROM8_GPIOA_P1_SHIFT);
SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
SSB_SPROM8_GPIOB_P3_SHIFT);
SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
SSB_SPROM8_TRI5G_SHIFT);
SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
SSB_SPROM8_TRI5GH_SHIFT);
SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
SSB_SPROM8_RXPO2G_SHIFT);
SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
SSB_SPROM8_RXPO5G_SHIFT);
SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
SSB_SPROM8_RSSISMC2G_SHIFT);
SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
SSB_SPROM8_RSSISAV2G_SHIFT);
SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
SSB_SPROM8_BXA2G_SHIFT);
SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
SSB_SPROM8_RSSISMC5G_SHIFT);
SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
SSB_SPROM8_RSSISAV5G_SHIFT);
SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
SSB_SPROM8_BXA5G_SHIFT);
SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
/* Extract the antenna gain values. */
bus->sprom.antenna_gain.a0 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN0,
SSB_SPROM8_AGAIN0_SHIFT);
bus->sprom.antenna_gain.a1 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN01,
SSB_SPROM8_AGAIN1,
SSB_SPROM8_AGAIN1_SHIFT);
bus->sprom.antenna_gain.a2 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN2,
SSB_SPROM8_AGAIN2_SHIFT);
bus->sprom.antenna_gain.a3 = sprom_extract_antgain(sprom,
SSB_SPROM8_AGAIN23,
SSB_SPROM8_AGAIN3,
SSB_SPROM8_AGAIN3_SHIFT);
SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
SSB_SPROM8_LEDDC_ON_SHIFT);
SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
SSB_SPROM8_LEDDC_OFF_SHIFT);
SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
SSB_SPROM8_TXRXC_SWITCH_SHIFT);
SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
SSB_SPROM8_THERMAL_TRESH_SHIFT);
SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
SSB_SPROM8_THERMAL_OFFSET_SHIFT);
SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
SSB_SPROM8_TEMPDELTA_PHYCAL,
SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
SSB_SPROM8_TEMPDELTA_HYSTERESIS,
SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
}
/*
* Indicates the presence of external SPROM.
*/
static bool bcma_sprom_ext_available(struct bcma_bus *bus)
{
u32 chip_status;
u32 srom_control;
u32 present_mask;
if (bus->drv_cc.core->id.rev >= 31) {
if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
return false;
srom_control = bcma_read32(bus->drv_cc.core,
BCMA_CC_SROM_CONTROL);
return srom_control & BCMA_CC_SROM_CONTROL_PRESENT;
}
/* older chipcommon revisions use chip status register */
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
break;
case BCMA_CHIP_ID_BCM4331:
present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
break;
default:
return true;
}
return chip_status & present_mask;
}
/*
* Indicates that on-chip OTP memory is present and enabled.
*/
static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
{
u32 chip_status;
u32 otpsize = 0;
bool present;
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
break;
case BCMA_CHIP_ID_BCM4331:
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
break;
case BCMA_CHIP_ID_BCM43142:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
/* for these chips OTP is always available */
present = true;
break;
case BCMA_CHIP_ID_BCM43131:
case BCMA_CHIP_ID_BCM43217:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
break;
default:
present = false;
break;
}
if (present) {
otpsize = bus->drv_cc.capabilities & BCMA_CC_CAP_OTPS;
otpsize >>= BCMA_CC_CAP_OTPS_SHIFT;
}
return otpsize != 0;
}
/*
* Verify OTP is filled and determine the byte
* offset where SPROM data is located.
*
* On error, returns 0; byte offset otherwise.
*/
static int bcma_sprom_onchip_offset(struct bcma_bus *bus)
{
struct bcma_device *cc = bus->drv_cc.core;
u32 offset;
/* verify OTP status */
if ((bcma_read32(cc, BCMA_CC_OTPS) & BCMA_CC_OTPS_GU_PROG_HW) == 0)
return 0;
/* obtain bit offset from otplayout register */
offset = (bcma_read32(cc, BCMA_CC_OTPL) & BCMA_CC_OTPL_GURGN_OFFSET);
return BCMA_CC_SPROM + (offset >> 3);
}
int bcma_sprom_get(struct bcma_bus *bus)
{
u16 offset = BCMA_CC_SPROM;
u16 *sprom;
static const size_t sprom_sizes[] = {
SSB_SPROMSIZE_WORDS_R4,
SSB_SPROMSIZE_WORDS_R10,
SSB_SPROMSIZE_WORDS_R11,
};
int i, err = 0;
if (!bus->drv_cc.core)
return -EOPNOTSUPP;
if (!bcma_sprom_ext_available(bus)) {
bool sprom_onchip;
/*
* External SPROM takes precedence so check
* on-chip OTP only when no external SPROM
* is present.
*/
sprom_onchip = bcma_sprom_onchip_available(bus);
if (sprom_onchip) {
/* determine offset */
offset = bcma_sprom_onchip_offset(bus);
}
if (!offset || !sprom_onchip) {
/*
* Maybe there is no SPROM on the device?
* Now we ask the arch code if there is some sprom
* available for this device in some other storage.
*/
err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
return err;
}
}
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
bcma_debug(bus, "SPROM offset 0x%x\n", offset);
for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) {
size_t words = sprom_sizes[i];
sprom = kcalloc(words, sizeof(u16), GFP_KERNEL);
if (!sprom)
return -ENOMEM;
bcma_sprom_read(bus, offset, sprom, words);
err = bcma_sprom_valid(bus, sprom, words);
if (!err)
break;
kfree(sprom);
}
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
if (err) {
bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n");
err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
} else {
bcma_sprom_extract_r8(bus, sprom);
kfree(sprom);
}
return err;
}
| linux-master | drivers/bcma/sprom.c |
/*
* Broadcom specific AMBA
* Core ops
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/export.h>
#include <linux/bcma/bcma.h>
static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
unsigned long deadline = jiffies + timeout;
u32 val;
do {
val = bcma_aread32(core, reg);
if ((val & mask) == value)
return true;
cpu_relax();
udelay(10);
} while (!time_after_eq(jiffies, deadline));
bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
return false;
}
bool bcma_core_is_enabled(struct bcma_device *core)
{
if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
!= BCMA_IOCTL_CLK)
return false;
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return false;
return true;
}
EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
void bcma_core_disable(struct bcma_device *core, u32 flags)
{
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return;
bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
bcma_awrite32(core, BCMA_IOCTL, flags);
bcma_aread32(core, BCMA_IOCTL);
udelay(10);
}
EXPORT_SYMBOL_GPL(bcma_core_disable);
int bcma_core_enable(struct bcma_device *core, u32 flags)
{
bcma_core_disable(core, flags);
bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
bcma_aread32(core, BCMA_IOCTL);
bcma_awrite32(core, BCMA_RESET_CTL, 0);
bcma_aread32(core, BCMA_RESET_CTL);
udelay(1);
bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
bcma_aread32(core, BCMA_IOCTL);
udelay(1);
return 0;
}
EXPORT_SYMBOL_GPL(bcma_core_enable);
void bcma_core_set_clockmode(struct bcma_device *core,
enum bcma_clkmode clkmode)
{
u16 i;
WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
core->id.id != BCMA_CORE_PCIE &&
core->id.id != BCMA_CORE_80211);
switch (clkmode) {
case BCMA_CLKMODE_FAST:
bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
usleep_range(64, 300);
for (i = 0; i < 1500; i++) {
if (bcma_read32(core, BCMA_CLKCTLST) &
BCMA_CLKCTLST_HAVEHT) {
i = 0;
break;
}
udelay(10);
}
if (i)
bcma_err(core->bus, "HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
break;
}
}
EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
{
u16 i;
WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
if (on) {
bcma_set32(core, BCMA_CLKCTLST, req);
for (i = 0; i < 10000; i++) {
if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
status) {
i = 0;
break;
}
udelay(10);
}
if (i)
bcma_err(core->bus, "PLL enable timeout\n");
} else {
/*
* Mask the PLL but don't wait for it to be disabled. PLL may be
* shared between cores and will be still up if there is another
* core using it.
*/
bcma_mask32(core, BCMA_CLKCTLST, ~req);
bcma_read32(core, BCMA_CLKCTLST);
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
u32 bcma_core_dma_translation(struct bcma_device *core)
{
switch (core->bus->hosttype) {
case BCMA_HOSTTYPE_SOC:
return 0;
case BCMA_HOSTTYPE_PCI:
if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
return BCMA_DMA_TRANSLATION_DMA64_CMT;
else
return BCMA_DMA_TRANSLATION_DMA32_CMT;
default:
bcma_err(core->bus, "DMA translation unknown for host %d\n",
core->bus->hosttype);
}
return BCMA_DMA_TRANSLATION_NONE;
}
EXPORT_SYMBOL(bcma_core_dma_translation);
| linux-master | drivers/bcma/core.c |
/*
* Broadcom specific AMBA
* ChipCommon B Unit driver
*
* Copyright 2014, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/export.h>
#include <linux/bcma/bcma.h>
static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask,
u32 value, int timeout)
{
unsigned long deadline = jiffies + timeout;
u32 val;
do {
val = readl(addr);
if ((val & mask) == value)
return true;
cpu_relax();
udelay(10);
} while (!time_after_eq(jiffies, deadline));
bcma_err(bus, "Timeout waiting for register %p\n", addr);
return false;
}
void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value)
{
struct bcma_bus *bus = ccb->core->bus;
void __iomem *mii = ccb->mii;
writel(offset, mii + BCMA_CCB_MII_MNG_CTL);
bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
writel(value, mii + BCMA_CCB_MII_MNG_CMD_DATA);
bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
}
EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write);
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
{
if (ccb->setup_done)
return 0;
ccb->setup_done = 1;
ccb->mii = ioremap(ccb->core->addr_s[1], BCMA_CORE_SIZE);
if (!ccb->mii)
return -ENOMEM;
return 0;
}
void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb)
{
if (ccb->mii)
iounmap(ccb->mii);
}
| linux-master | drivers/bcma/driver_chipcommon_b.c |
/*
* Broadcom specific AMBA
* ChipCommon Power Management Unit driver
*
* Copyright 2009, Michael Buesch <[email protected]>
* Copyright 2007, 2011, Broadcom Corporation
* Copyright 2011, 2012, Hauke Mehrtens <[email protected]>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/export.h>
#include <linux/bcma/bcma.h>
u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
u32 offset, u32 mask, u32 set)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
{
u32 ilp_ctl, alp_hz;
if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
return 0;
bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
usleep_range(1000, 2000);
ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
alp_hz = ilp_ctl * 32768 / 4;
return (alp_hz + 50000) / 100000 * 100;
}
static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
{
struct bcma_bus *bus = cc->core->bus;
u32 freq_tgt_target = 0, freq_tgt_current;
u32 pll0, mask;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM43142:
/* pmu2_xtaltab0_adfll_485 */
switch (xtalfreq) {
case 12000:
freq_tgt_target = 0x50D52;
break;
case 20000:
freq_tgt_target = 0x307FE;
break;
case 26000:
freq_tgt_target = 0x254EA;
break;
case 37400:
freq_tgt_target = 0x19EF8;
break;
case 52000:
freq_tgt_target = 0x12A75;
break;
}
break;
}
if (!freq_tgt_target) {
bcma_err(bus, "Unknown TGT frequency for xtalfreq %d\n",
xtalfreq);
return;
}
pll0 = bcma_chipco_pll_read(cc, BCMA_CC_PMU15_PLL_PLLCTL0);
freq_tgt_current = (pll0 & BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK) >>
BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
if (freq_tgt_current == freq_tgt_target) {
bcma_debug(bus, "Target TGT frequency already set\n");
return;
}
/* Turn off PLL */
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM43142:
mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
BCMA_RES_4314_MACPHY_CLK_AVAIL);
bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
bcma_wait_value(cc->core, BCMA_CLKCTLST,
BCMA_CLKCTLST_HAVEHT, 0, 20000);
break;
}
pll0 &= ~BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK;
pll0 |= freq_tgt_target << BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
bcma_chipco_pll_write(cc, BCMA_CC_PMU15_PLL_PLLCTL0, pll0);
/* Flush */
if (cc->pmu.rev >= 2)
bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
/* TODO: Do we need to update OTP? */
}
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 xtalfreq = bcma_pmu_xtalfreq(cc);
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM43142:
if (xtalfreq == 0)
xtalfreq = 20000;
bcma_pmu2_pll_init0(cc, xtalfreq);
break;
}
}
static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 min_msk = 0, max_msk = 0;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
min_msk = 0x200D;
max_msk = 0xFFFF;
break;
case BCMA_CHIP_ID_BCM43142:
min_msk = BCMA_RES_4314_LPLDO_PU |
BCMA_RES_4314_PMU_SLEEP_DIS |
BCMA_RES_4314_PMU_BG_PU |
BCMA_RES_4314_CBUCK_LPOM_PU |
BCMA_RES_4314_CBUCK_PFM_PU |
BCMA_RES_4314_CLDO_PU |
BCMA_RES_4314_LPLDO2_LVM |
BCMA_RES_4314_WL_PMU_PU |
BCMA_RES_4314_LDO3P3_PU |
BCMA_RES_4314_OTP_PU |
BCMA_RES_4314_WL_PWRSW_PU |
BCMA_RES_4314_LQ_AVAIL |
BCMA_RES_4314_LOGIC_RET |
BCMA_RES_4314_MEM_SLEEP |
BCMA_RES_4314_MACPHY_RET |
BCMA_RES_4314_WL_CORE_READY;
max_msk = 0x3FFFFFFF;
break;
default:
bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
bus->chipinfo.id);
}
/* Set the resource masks. */
if (min_msk)
bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
/*
* Add some delay; allow resources to come up and settle.
* Delay is required for SoC (early init).
*/
usleep_range(2000, 2500);
}
/* Disable to allow reading SPROM. Don't know the advantages of enabling it. */
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
{
struct bcma_bus *bus = cc->core->bus;
u32 val;
val = bcma_cc_read32(cc, BCMA_CC_CHIPCTL);
if (enable) {
val |= BCMA_CHIPCTL_4331_EXTPA_EN;
if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
else if (bus->chipinfo.rev > 0)
val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
} else {
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
}
bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
}
static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
/*
* enable 12 mA drive strength for 4313 and set chipControl
* register bit 1
*/
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_4313_12MA_LED_DRIVE,
BCMA_CCTRL_4313_12MA_LED_DRIVE);
break;
case BCMA_CHIP_ID_BCM4331:
case BCMA_CHIP_ID_BCM43431:
/* Ext PA lines must be enabled for tx on BCM4331 */
bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
/*
* enable 12 mA drive strength for 43224 and set chipControl
* register bit 15
*/
if (bus->chipinfo.rev == 0) {
bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
~BCMA_CCTRL_43224_GPIO_TOGGLE,
BCMA_CCTRL_43224_GPIO_TOGGLE);
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
} else {
bcma_chipco_chipctl_maskset(cc, 0,
~BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
}
break;
default:
bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
bus->chipinfo.id);
}
}
void bcma_pmu_early_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 pmucap;
if (cc->core->id.rev >= 35 &&
cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
if (!cc->pmu.core)
bcma_warn(bus, "Couldn't find expected PMU core");
}
if (!cc->pmu.core)
cc->pmu.core = cc->core;
pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
pmucap);
}
void bcma_pmu_init(struct bcma_drv_cc *cc)
{
if (cc->pmu.rev == 1)
bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
~BCMA_CC_PMU_CTL_NOILPONW);
else
bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
BCMA_CC_PMU_CTL_NOILPONW);
bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
bcma_pmu_workarounds(cc);
}
u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4313:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM4331:
case BCMA_CHIP_ID_BCM43421:
case BCMA_CHIP_ID_BCM43428:
case BCMA_CHIP_ID_BCM43431:
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM47162:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM4749:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
case BCMA_CHIP_ID_BCM6362:
/* always 20Mhz */
return 20000 * 1000;
case BCMA_CHIP_ID_BCM4706:
case BCMA_CHIP_ID_BCM5356:
/* always 25Mhz */
return 25000 * 1000;
case BCMA_CHIP_ID_BCM43460:
case BCMA_CHIP_ID_BCM4352:
case BCMA_CHIP_ID_BCM4360:
if (cc->status & BCMA_CC_CHIPST_4360_XTAL_40MZ)
return 40000 * 1000;
else
return 20000 * 1000;
default:
bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
}
return BCMA_CC_PMU_ALP_CLOCK;
}
/* Find the output of the "m" pll divider given pll controls that start with
* pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
*/
static u32 bcma_pmu_pll_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
{
u32 tmp, div, ndiv, p1, p2, fc;
struct bcma_bus *bus = cc->core->bus;
BUG_ON((pll0 & 3) || (pll0 > BCMA_CC_PMU4716_MAINPLL_PLL0));
BUG_ON(!m || m > 4);
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
/* Detect failure in clock setting */
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & 0x40000)
return 133 * 1000000;
}
tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_P1P2_OFF);
p1 = (tmp & BCMA_CC_PPL_P1_MASK) >> BCMA_CC_PPL_P1_SHIFT;
p2 = (tmp & BCMA_CC_PPL_P2_MASK) >> BCMA_CC_PPL_P2_SHIFT;
tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_M14_OFF);
div = (tmp >> ((m - 1) * BCMA_CC_PPL_MDIV_WIDTH)) &
BCMA_CC_PPL_MDIV_MASK;
tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_NM5_OFF);
ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
/* Do calculation in Mhz */
fc = bcma_pmu_get_alp_clock(cc) / 1000000;
fc = (p1 * ndiv * fc) / p2;
/* Return clock in Hertz */
return (fc / div) * 1000000;
}
static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
{
u32 tmp, ndiv, p1div, p2div;
u32 clock;
BUG_ON(!m || m > 4);
/* Get N, P1 and P2 dividers to determine CPU clock */
tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
>> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
>> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
>> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
/* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
clock = (25000000 / 4) * ndiv * p2div / p1div;
else
/* Fixed reference clock 25MHz and m = 2 */
clock = (25000000 / 2) * ndiv * p2div / p1div;
if (m == BCMA_CC_PMU5_MAINPLL_SSB)
clock = clock / 4;
return clock;
}
/* query bus clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
return bcma_pmu_pll_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM5356:
return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM4706:
return bcma_pmu_pll_clock_bcm4706(cc,
BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM53572:
return 75000000;
default:
bcma_warn(bus, "No bus clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
}
return BCMA_CC_PMU_HT_CLOCK;
}
EXPORT_SYMBOL_GPL(bcma_pmu_get_bus_clock);
/* query cpu clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
return 300000000;
/* New PMUs can have different clock for bus and CPU */
if (cc->pmu.rev >= 5) {
u32 pll;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM4706:
return bcma_pmu_pll_clock_bcm4706(cc,
BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_CPU);
case BCMA_CHIP_ID_BCM5356:
pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
break;
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
break;
default:
pll = BCMA_CC_PMU4716_MAINPLL_PLL0;
break;
}
return bcma_pmu_pll_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
}
/* On old PMUs CPU has the same clock as the bus */
return bcma_pmu_get_bus_clock(cc);
}
static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value)
{
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
{
u32 tmp = 0;
u8 phypll_offset = 0;
u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
case BCMA_CHIP_ID_BCM53572:
/* 5357[ab]0, 43236[ab]0, and 6362b0 */
/*
* BCM5357 needs to touch PLL1_PLLCTL[02],
* so offset PLL0_PLLCTL[02] by 6
*/
phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
/* RMW only the P1 divider */
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
/* RMW only the int feedback divider */
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
case BCMA_CHIP_ID_BCM4331:
case BCMA_CHIP_ID_BCM43431:
if (spuravoid == 2) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0FC00a08);
} else if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600a08);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
}
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
case BCMA_CHIP_ID_BCM43421:
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500010);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x000C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x2001E920);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100010);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x000c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500060);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x080C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x2001E924);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100060);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x080c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW;
break;
case BCMA_CHIP_ID_BCM43131:
case BCMA_CHIP_ID_BCM43217:
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
/* LCNXN */
/*
* PLL Settings for spur avoidance on/off mode,
* no on2 support for 43228A0
*/
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x01100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x040C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03140A08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00333333);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x202C2820);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x040c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
default:
bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
bus->chipinfo.id);
break;
}
tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
}
EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
| linux-master | drivers/bcma/driver_chipcommon_pmu.c |
/*
* Broadcom specific AMBA
* ChipCommon parallel flash
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
static const char * const part_probes[] = { "bcm47xxpart", NULL };
static struct physmap_flash_data bcma_pflash_data = {
.part_probe_types = part_probes,
};
static struct resource bcma_pflash_resource = {
.name = "bcma_pflash",
.flags = IORESOURCE_MEM,
};
struct platform_device bcma_pflash_dev = {
.name = "physmap-flash",
.dev = {
.platform_data = &bcma_pflash_data,
},
.resource = &bcma_pflash_resource,
.num_resources = 1,
};
int bcma_pflash_init(struct bcma_drv_cc *cc)
{
struct bcma_pflash *pflash = &cc->pflash;
pflash->present = true;
if (!(bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS))
bcma_pflash_data.width = 1;
else
bcma_pflash_data.width = 2;
bcma_pflash_resource.start = BCMA_SOC_FLASH2;
bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ;
return 0;
}
| linux-master | drivers/bcma/driver_chipcommon_pflash.c |
/*
* Broadcom specific AMBA
* Bus subsystem
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/bcma/bcma.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static void bcma_device_remove(struct device *dev);
static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.manuf);
}
static DEVICE_ATTR_RO(manuf);
static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%03X\n", core->id.id);
}
static DEVICE_ATTR_RO(id);
static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%02X\n", core->id.rev);
}
static DEVICE_ATTR_RO(rev);
static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
return sprintf(buf, "0x%X\n", core->id.class);
}
static DEVICE_ATTR_RO(class);
static struct attribute *bcma_device_attrs[] = {
&dev_attr_manuf.attr,
&dev_attr_id.attr,
&dev_attr_rev.attr,
&dev_attr_class.attr,
NULL,
};
ATTRIBUTE_GROUPS(bcma_device);
static struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
.remove = bcma_device_remove,
.uevent = bcma_device_uevent,
.dev_groups = bcma_device_groups,
};
static u16 bcma_cc_core_id(struct bcma_bus *bus)
{
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
return BCMA_CORE_4706_CHIPCOMMON;
return BCMA_CORE_CHIPCOMMON;
}
struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
u8 unit)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
if (core->id.id == coreid && core->core_unit == unit)
return core;
}
return NULL;
}
EXPORT_SYMBOL_GPL(bcma_find_core_unit);
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout)
{
unsigned long deadline = jiffies + timeout;
u32 val;
do {
val = bcma_read32(core, reg);
if ((val & mask) == value)
return true;
cpu_relax();
udelay(10);
} while (!time_after_eq(jiffies, deadline));
bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
return false;
}
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
if (core->io_addr)
iounmap(core->io_addr);
if (core->io_wrap)
iounmap(core->io_wrap);
kfree(core);
}
static bool bcma_is_core_needed_early(u16 core_id)
{
switch (core_id) {
case BCMA_CORE_NS_NAND:
case BCMA_CORE_NS_QSPI:
return true;
}
return false;
}
static struct device_node *bcma_of_find_child_device(struct device *parent,
struct bcma_device *core)
{
struct device_node *node;
int ret;
if (!parent->of_node)
return NULL;
for_each_child_of_node(parent->of_node, node) {
struct resource res;
ret = of_address_to_resource(node, 0, &res);
if (ret)
continue;
if (res.start == core->addr)
return node;
}
return NULL;
}
static int bcma_of_irq_parse(struct device *parent,
struct bcma_device *core,
struct of_phandle_args *out_irq, int num)
{
__be32 laddr[1];
int rc;
if (core->dev.of_node) {
rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
if (!rc)
return rc;
}
out_irq->np = parent->of_node;
out_irq->args_count = 1;
out_irq->args[0] = num;
laddr[0] = cpu_to_be32(core->addr);
return of_irq_parse_raw(laddr, out_irq);
}
static unsigned int bcma_of_get_irq(struct device *parent,
struct bcma_device *core, int num)
{
struct of_phandle_args out_irq;
int ret;
if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
if (ret) {
bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
ret);
return 0;
}
return irq_create_of_mapping(&out_irq);
}
static void bcma_of_fill_device(struct device *parent,
struct bcma_device *core)
{
struct device_node *node;
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
of_dma_configure(&core->dev, node, false);
}
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
struct bcma_bus *bus = core->bus;
unsigned int mips_irq;
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
return bus->host_pci->irq;
case BCMA_HOSTTYPE_SOC:
if (bus->drv_mips.core && num == 0) {
mips_irq = bcma_core_mips_irq(core);
return mips_irq <= 4 ? mips_irq + 2 : 0;
}
if (bus->dev)
return bcma_of_get_irq(bus->dev, core, num);
return 0;
case BCMA_HOSTTYPE_SDIO:
return 0;
}
return 0;
}
EXPORT_SYMBOL(bcma_core_irq);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
{
device_initialize(&core->dev);
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
core->dev.parent = bus->dev;
if (bus->dev)
bcma_of_fill_device(bus->dev, core);
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dma_dev = bus->dev;
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
if (IS_ENABLED(CONFIG_OF) && bus->dev) {
core->dma_dev = bus->dev;
} else {
core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
}
break;
case BCMA_HOSTTYPE_SDIO:
break;
}
}
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
bus->num = bcma_bus_next_num++;
mutex_unlock(&bcma_buses_mutex);
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
bcma_detect_chip(bus);
}
static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
err = device_add(&core->dev);
if (err) {
bcma_err(bus, "Could not register dev for core 0x%03X\n",
core->id.id);
return;
}
core->dev_registered = true;
}
static int bcma_register_devices(struct bcma_bus *bus)
{
struct bcma_device *core;
int err;
list_for_each_entry(core, &bus->cores, list) {
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_NS_CHIPCOMMON_B:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
case BCMA_CORE_PCIE2:
case BCMA_CORE_MIPS_74K:
case BCMA_CORE_4706_MAC_GBIT_COMMON:
continue;
}
/* Early cores were already registered */
if (bcma_is_core_needed_early(core->id.id))
continue;
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
continue;
bcma_register_core(bus, core);
}
#ifdef CONFIG_BCMA_PFLASH
if (bus->drv_cc.pflash.present) {
err = platform_device_register(&bcma_pflash_dev);
if (err)
bcma_err(bus, "Error registering parallel flash\n");
}
#endif
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
if (err)
bcma_err(bus, "Error registering serial flash\n");
}
#endif
#ifdef CONFIG_BCMA_NFLASH
if (bus->drv_cc.nflash.present) {
err = platform_device_register(&bcma_nflash_dev);
if (err)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
err = bcma_gpio_init(&bus->drv_cc);
if (err == -ENOTSUPP)
bcma_debug(bus, "GPIO driver not activated\n");
else if (err) {
bcma_err(bus, "Error registering GPIO driver: %i\n", err);
return err;
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
err = bcma_chipco_watchdog_register(&bus->drv_cc);
if (err)
bcma_err(bus, "Error registering watchdog driver\n");
}
return 0;
}
void bcma_unregister_cores(struct bcma_bus *bus)
{
struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
if (!core->dev_registered)
continue;
list_del(&core->list);
device_unregister(&core->dev);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
/* Now no one uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
put_device(&core->dev);
}
}
int bcma_bus_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan: %d\n", err);
return err;
}
/* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
/* Early init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
if (bus->dev)
of_platform_default_populate(bus->dev->of_node, NULL, bus->dev);
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
bcma_register_core(bus, core);
}
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
bcma_err(bus, "No SPROM available\n");
} else if (err)
bcma_err(bus, "Failed to get SPROM: %d\n", err);
/* Init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
}
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
if (core) {
bus->drv_cc_b.core = core;
bcma_core_chipcommon_b_init(&bus->drv_cc_b);
}
/* Init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_init(&bus->drv_mips);
}
/* Init PCIE core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_init(&bus->drv_pci[0]);
}
/* Init PCIE core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
if (core) {
bus->drv_pci[1].core = core;
bcma_core_pci_init(&bus->drv_pci[1]);
}
/* Init PCIe Gen 2 core */
core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
if (core) {
bus->drv_pcie2.core = core;
bcma_core_pcie2_init(&bus->drv_pcie2);
}
/* Init GBIT MAC COMMON core */
core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
if (core) {
bus->drv_gmac_cmn.core = core;
bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
}
/* Register found cores */
bcma_register_devices(bus);
bcma_info(bus, "Bus registered\n");
return 0;
}
void bcma_bus_unregister(struct bcma_bus *bus)
{
int err;
err = bcma_gpio_unregister(&bus->drv_cc);
if (err == -EBUSY)
bcma_err(bus, "Some GPIOs are still in use.\n");
else if (err)
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
bcma_core_chipcommon_b_free(&bus->drv_cc_b);
bcma_unregister_cores(bus);
}
/*
* This is a special version of bus registration function designed for SoCs.
* It scans bus and performs basic initialization of main cores only.
* Please note it requires memory allocation, however it won't try to sleep.
*/
int __init bcma_bus_early_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan bus: %d\n", err);
return -1;
}
/* Early init CC core */
core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
/* Early init MIPS core */
core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
if (core) {
bus->drv_mips.core = core;
bcma_core_mips_early_init(&bus->drv_mips);
}
bcma_info(bus, "Early bus registered\n");
return 0;
}
#ifdef CONFIG_PM
int bcma_bus_suspend(struct bcma_bus *bus)
{
struct bcma_device *core;
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->suspend)
adrv->suspend(core);
}
}
return 0;
}
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
/* Init CC core */
if (bus->drv_cc.core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
list_for_each_entry(core, &bus->cores, list) {
struct device_driver *drv = core->dev.driver;
if (drv) {
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
if (adrv->resume)
adrv->resume(core);
}
}
return 0;
}
#endif
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
drv->drv.bus = &bcma_bus_type;
drv->drv.owner = owner;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(__bcma_driver_register);
void bcma_driver_unregister(struct bcma_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(bcma_driver_unregister);
static int bcma_bus_match(struct device *dev, struct device_driver *drv)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
const struct bcma_device_id *cid = &core->id;
const struct bcma_device_id *did;
for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
(did->id == cid->id || did->id == BCMA_ANY_ID) &&
(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
(did->class == cid->class || did->class == BCMA_ANY_CLASS))
return 1;
}
return 0;
}
static int bcma_device_probe(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
int err = 0;
get_device(dev);
if (adrv->probe)
err = adrv->probe(core);
if (err)
put_device(dev);
return err;
}
static void bcma_device_remove(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
drv);
if (adrv->remove)
adrv->remove(core);
put_device(dev);
}
static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev);
return add_uevent_var(env,
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
core->id.manuf, core->id.id,
core->id.rev, core->id.class);
}
static unsigned int bcma_bus_registered;
/*
* If built-in, bus has to be registered early, before any driver calls
* bcma_driver_register.
* Otherwise registering driver would trigger BUG in driver_register.
*/
static int __init bcma_init_bus_register(void)
{
int err;
if (bcma_bus_registered)
return 0;
err = bus_register(&bcma_bus_type);
if (!err)
bcma_bus_registered = 1;
return err;
}
#ifndef MODULE
fs_initcall(bcma_init_bus_register);
#endif
/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
static int __init bcma_modinit(void)
{
int err;
err = bcma_init_bus_register();
if (err)
return err;
err = bcma_host_soc_register_driver();
if (err) {
pr_err("SoC host initialization failed\n");
err = 0;
}
#ifdef CONFIG_BCMA_HOST_PCI
err = bcma_host_pci_init();
if (err) {
pr_err("PCI host initialization failed\n");
err = 0;
}
#endif
return err;
}
module_init(bcma_modinit);
static void __exit bcma_modexit(void)
{
#ifdef CONFIG_BCMA_HOST_PCI
bcma_host_pci_exit();
#endif
bcma_host_soc_unregister_driver();
bus_unregister(&bcma_bus_type);
}
module_exit(bcma_modexit)
| linux-master | drivers/bcma/main.c |
/*
* Broadcom specific AMBA
* PCI Host
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
#include <linux/pci.h>
#include <linux/module.h>
static void bcma_host_pci_switch_core(struct bcma_device *core)
{
int win2 = core->bus->host_is_pcie2 ?
BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
core->addr);
pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
core->bus->mapped_core = core;
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}
/* Provides access to the requested core. Returns base offset that has to be
* used. It makes use of fixed windows when possible. */
static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{
switch (core->id.id) {
case BCMA_CORE_CHIPCOMMON:
return 3 * BCMA_CORE_SIZE;
case BCMA_CORE_PCIE:
return 2 * BCMA_CORE_SIZE;
}
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
return 0;
}
static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
{
offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset);
}
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{
offset += bcma_host_pci_provide_access_to_core(core);
return ioread16(core->bus->mmio + offset);
}
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{
offset += bcma_host_pci_provide_access_to_core(core);
return ioread32(core->bus->mmio + offset);
}
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value)
{
offset += bcma_host_pci_provide_access_to_core(core);
iowrite8(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value)
{
offset += bcma_host_pci_provide_access_to_core(core);
iowrite16(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value)
{
offset += bcma_host_pci_provide_access_to_core(core);
iowrite32(value, core->bus->mmio + offset);
}
#ifdef CONFIG_BCMA_BLOCKIO
static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
void __iomem *addr = core->bus->mmio + offset;
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
switch (reg_width) {
case sizeof(u8):
ioread8_rep(addr, buffer, count);
break;
case sizeof(u16):
WARN_ON(count & 1);
ioread16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
WARN_ON(count & 3);
ioread32_rep(addr, buffer, count >> 2);
break;
default:
WARN_ON(1);
}
}
static void bcma_host_pci_block_write(struct bcma_device *core,
const void *buffer, size_t count,
u16 offset, u8 reg_width)
{
void __iomem *addr = core->bus->mmio + offset;
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
switch (reg_width) {
case sizeof(u8):
iowrite8_rep(addr, buffer, count);
break;
case sizeof(u16):
WARN_ON(count & 1);
iowrite16_rep(addr, buffer, count >> 1);
break;
case sizeof(u32):
WARN_ON(count & 3);
iowrite32_rep(addr, buffer, count >> 2);
break;
default:
WARN_ON(1);
}
}
#endif
static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
{
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
}
static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
u32 value)
{
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
}
static const struct bcma_host_ops bcma_host_pci_ops = {
.read8 = bcma_host_pci_read8,
.read16 = bcma_host_pci_read16,
.read32 = bcma_host_pci_read32,
.write8 = bcma_host_pci_write8,
.write16 = bcma_host_pci_write16,
.write32 = bcma_host_pci_write32,
#ifdef CONFIG_BCMA_BLOCKIO
.block_read = bcma_host_pci_block_read,
.block_write = bcma_host_pci_block_write,
#endif
.aread32 = bcma_host_pci_aread32,
.awrite32 = bcma_host_pci_awrite32,
};
static int bcma_host_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct bcma_bus *bus;
int err = -ENOMEM;
u32 val;
/* Alloc */
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus)
goto out;
/* Basic PCI configuration */
err = pci_enable_device(dev);
if (err)
goto err_kfree_bus;
err = pci_request_regions(dev, "bcma-pci-bridge");
if (err)
goto err_pci_disable;
pci_set_master(dev);
/* Disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_read_config_dword(dev, 0x40, &val);
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
if (!pci_is_pcie(dev)) {
bcma_err(bus, "PCI card detected, they are not supported.\n");
err = -ENXIO;
goto err_pci_release_regions;
}
bus->dev = &dev->dev;
/* Map MMIO */
err = -ENOMEM;
bus->mmio = pci_iomap(dev, 0, ~0UL);
if (!bus->mmio)
goto err_pci_release_regions;
/* Host specific */
bus->host_pci = dev;
bus->hosttype = BCMA_HOSTTYPE_PCI;
bus->ops = &bcma_host_pci_ops;
bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
bus->boardinfo.type = bus->host_pci->subsystem_device;
/* Initialize struct, detect chip */
bcma_init_bus(bus);
/* Scan bus to find out generation of PCIe core */
err = bcma_bus_scan(bus);
if (err)
goto err_pci_unmap_mmio;
if (bcma_find_core(bus, BCMA_CORE_PCIE2))
bus->host_is_pcie2 = true;
/* Register */
err = bcma_bus_register(bus);
if (err)
goto err_unregister_cores;
pci_set_drvdata(dev, bus);
out:
return err;
err_unregister_cores:
bcma_unregister_cores(bus);
err_pci_unmap_mmio:
pci_iounmap(dev, bus->mmio);
err_pci_release_regions:
pci_release_regions(dev);
err_pci_disable:
pci_disable_device(dev);
err_kfree_bus:
kfree(bus);
return err;
}
static void bcma_host_pci_remove(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
bcma_bus_unregister(bus);
pci_iounmap(dev, bus->mmio);
pci_release_regions(dev);
pci_disable_device(dev);
kfree(bus);
}
#ifdef CONFIG_PM_SLEEP
static int bcma_host_pci_suspend(struct device *dev)
{
struct bcma_bus *bus = dev_get_drvdata(dev);
bus->mapped_core = NULL;
return bcma_bus_suspend(bus);
}
static int bcma_host_pci_resume(struct device *dev)
{
struct bcma_bus *bus = dev_get_drvdata(dev);
return bcma_bus_resume(bus);
}
static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
bcma_host_pci_resume);
#define BCMA_PM_OPS (&bcma_pm_ops)
#else /* CONFIG_PM_SLEEP */
#define BCMA_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
static struct pci_driver bcma_pci_bridge_driver = {
.name = "bcma-pci-bridge",
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
.driver.pm = BCMA_PM_OPS,
};
int __init bcma_host_pci_init(void)
{
return pci_register_driver(&bcma_pci_bridge_driver);
}
void __exit bcma_host_pci_exit(void)
{
pci_unregister_driver(&bcma_pci_bridge_driver);
}
/**************************************************
* Runtime ops for drivers.
**************************************************/
/* See also pcicore_up */
void bcma_host_pci_up(struct bcma_bus *bus)
{
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
if (bus->host_is_pcie2)
bcma_core_pcie2_up(&bus->drv_pcie2);
else
bcma_core_pci_up(&bus->drv_pci[0]);
}
EXPORT_SYMBOL_GPL(bcma_host_pci_up);
/* See also pcicore_down */
void bcma_host_pci_down(struct bcma_bus *bus)
{
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
if (!bus->host_is_pcie2)
bcma_core_pci_down(&bus->drv_pci[0]);
}
EXPORT_SYMBOL_GPL(bcma_host_pci_down);
/* See also si_pci_setup */
int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
bool enable)
{
struct pci_dev *pdev;
u32 coremask, tmp;
int err = 0;
if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
/* This bcma device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
* So we must not enable routing through the PCI core. */
goto out;
}
pdev = bus->host_pci;
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
if (err)
goto out;
coremask = BIT(core->core_index) << 8;
if (enable)
tmp |= coremask;
else
tmp &= ~coremask;
err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
out:
return err;
}
EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
| linux-master | drivers/bcma/host_pci.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.