python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Richtek Technology Corp.
*
* Authors:
* Alice Chen <[email protected]>
* ChiYuan Huang <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <media/v4l2-flash-led-class.h>
enum {
MT6370_LED_FLASH1 = 0,
MT6370_LED_FLASH2,
MT6370_MAX_LEDS
};
/* Virtual definition for multicolor */
#define MT6370_REG_FLEDEN 0x17E
#define MT6370_REG_STRBTO 0x173
#define MT6370_REG_CHGSTAT2 0x1D1
#define MT6370_REG_FLEDSTAT1 0x1D9
#define MT6370_REG_FLEDISTRB(_id) (0x174 + 4 * (_id))
#define MT6370_REG_FLEDITOR(_id) (0x175 + 4 * (_id))
#define MT6370_ITORCH_MASK GENMASK(4, 0)
#define MT6370_ISTROBE_MASK GENMASK(6, 0)
#define MT6370_STRBTO_MASK GENMASK(6, 0)
#define MT6370_TORCHEN_MASK BIT(3)
#define MT6370_STROBEN_MASK BIT(2)
#define MT6370_FLCSEN_MASK(_id) BIT(MT6370_LED_FLASH2 - (_id))
#define MT6370_FLCSEN_MASK_ALL GENMASK(1, 0)
#define MT6370_FLEDCHGVINOVP_MASK BIT(3)
#define MT6370_FLED1STRBTO_MASK BIT(11)
#define MT6370_FLED2STRBTO_MASK BIT(10)
#define MT6370_FLED1STRB_MASK BIT(9)
#define MT6370_FLED2STRB_MASK BIT(8)
#define MT6370_FLED1SHORT_MASK BIT(7)
#define MT6370_FLED2SHORT_MASK BIT(6)
#define MT6370_FLEDLVF_MASK BIT(3)
#define MT6370_LED_JOINT 2
#define MT6370_RANGE_FLED_REG 4
#define MT6370_ITORCH_MIN_uA 25000
#define MT6370_ITORCH_STEP_uA 12500
#define MT6370_ITORCH_MAX_uA 400000
#define MT6370_ITORCH_DOUBLE_MAX_uA 800000
#define MT6370_ISTRB_MIN_uA 50000
#define MT6370_ISTRB_STEP_uA 12500
#define MT6370_ISTRB_MAX_uA 1500000
#define MT6370_ISTRB_DOUBLE_MAX_uA 3000000
#define MT6370_STRBTO_MIN_US 64000
#define MT6370_STRBTO_STEP_US 32000
#define MT6370_STRBTO_MAX_US 2432000
#define to_mt6370_led(ptr, member) container_of(ptr, struct mt6370_led, member)
struct mt6370_led {
struct led_classdev_flash flash;
struct v4l2_flash *v4l2_flash;
struct mt6370_priv *priv;
u8 led_no;
};
struct mt6370_priv {
struct regmap *regmap;
struct mutex lock;
unsigned int fled_strobe_used;
unsigned int fled_torch_used;
unsigned int leds_active;
unsigned int leds_count;
struct mt6370_led leds[];
};
static int mt6370_torch_brightness_set(struct led_classdev *lcdev, enum led_brightness level)
{
struct mt6370_led *led = to_mt6370_led(lcdev, flash.led_cdev);
struct mt6370_priv *priv = led->priv;
u32 led_enable_mask = led->led_no == MT6370_LED_JOINT ? MT6370_FLCSEN_MASK_ALL :
MT6370_FLCSEN_MASK(led->led_no);
u32 enable_mask = MT6370_TORCHEN_MASK | led_enable_mask;
u32 val = level ? led_enable_mask : 0;
u32 curr;
int ret, i;
mutex_lock(&priv->lock);
/*
* There is only one set of flash control logic, and this flag is used to check if 'strobe'
* is currently being used.
*/
if (priv->fled_strobe_used) {
dev_warn(lcdev->dev, "Please disable strobe first [%d]\n", priv->fled_strobe_used);
ret = -EBUSY;
goto unlock;
}
if (level)
curr = priv->fled_torch_used | BIT(led->led_no);
else
curr = priv->fled_torch_used & ~BIT(led->led_no);
if (curr)
val |= MT6370_TORCHEN_MASK;
if (level) {
level -= 1;
if (led->led_no == MT6370_LED_JOINT) {
u32 flevel[MT6370_MAX_LEDS];
/*
* There're two flash channels in MT6370. If joint flash output is used,
* torch current will be averaged output from both channels.
*/
flevel[0] = level / 2;
flevel[1] = level - flevel[0];
for (i = 0; i < MT6370_MAX_LEDS; i++) {
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDITOR(i),
MT6370_ITORCH_MASK, flevel[i]);
if (ret)
goto unlock;
}
} else {
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDITOR(led->led_no),
MT6370_ITORCH_MASK, level);
if (ret)
goto unlock;
}
}
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDEN, enable_mask, val);
if (ret)
goto unlock;
priv->fled_torch_used = curr;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6370_flash_brightness_set(struct led_classdev_flash *fl_cdev, u32 brightness)
{
/*
* Because of the current spikes when turning on the flash, the brightness should be kept
* by the LED framework. This empty function is used to prevent checking failure when
* led_classdev_flash registers ops.
*/
return 0;
}
static int _mt6370_flash_brightness_set(struct led_classdev_flash *fl_cdev, u32 brightness)
{
struct mt6370_led *led = to_mt6370_led(fl_cdev, flash);
struct mt6370_priv *priv = led->priv;
struct led_flash_setting *setting = &fl_cdev->brightness;
u32 val = (brightness - setting->min) / setting->step;
int ret, i;
if (led->led_no == MT6370_LED_JOINT) {
u32 flevel[MT6370_MAX_LEDS];
/*
* There're two flash channels in MT6370. If joint flash output is used, storbe
* current will be averaged output from both channels.
*/
flevel[0] = val / 2;
flevel[1] = val - flevel[0];
for (i = 0; i < MT6370_MAX_LEDS; i++) {
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDISTRB(i),
MT6370_ISTROBE_MASK, flevel[i]);
if (ret)
break;
}
} else {
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDISTRB(led->led_no),
MT6370_ISTROBE_MASK, val);
}
return ret;
}
static int mt6370_strobe_set(struct led_classdev_flash *fl_cdev, bool state)
{
struct mt6370_led *led = to_mt6370_led(fl_cdev, flash);
struct mt6370_priv *priv = led->priv;
struct led_classdev *lcdev = &fl_cdev->led_cdev;
struct led_flash_setting *s = &fl_cdev->brightness;
u32 led_enable_mask = led->led_no == MT6370_LED_JOINT ? MT6370_FLCSEN_MASK_ALL :
MT6370_FLCSEN_MASK(led->led_no);
u32 enable_mask = MT6370_STROBEN_MASK | led_enable_mask;
u32 val = state ? led_enable_mask : 0;
u32 curr;
int ret;
mutex_lock(&priv->lock);
/*
* There is only one set of flash control logic, and this flag is used to check if 'torch'
* is currently being used.
*/
if (priv->fled_torch_used) {
dev_warn(lcdev->dev, "Please disable torch first [0x%x]\n", priv->fled_torch_used);
ret = -EBUSY;
goto unlock;
}
if (state)
curr = priv->fled_strobe_used | BIT(led->led_no);
else
curr = priv->fled_strobe_used & ~BIT(led->led_no);
if (curr)
val |= MT6370_STROBEN_MASK;
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDEN, enable_mask, val);
if (ret) {
dev_err(lcdev->dev, "[%d] control current source %d fail\n", led->led_no, state);
goto unlock;
}
/*
* If the flash needs to turn on, configure the flash current to ramp up to the setting
* value. Otherwise, always revert to the minimum one.
*/
ret = _mt6370_flash_brightness_set(fl_cdev, state ? s->val : s->min);
if (ret) {
dev_err(lcdev->dev, "[%d] Failed to set brightness\n", led->led_no);
goto unlock;
}
/*
* For the flash to turn on/off, we must wait for HW ramping up/down time 5ms/500us to
* prevent the unexpected problem.
*/
if (!priv->fled_strobe_used && curr)
usleep_range(5000, 6000);
else if (priv->fled_strobe_used && !curr)
usleep_range(500, 600);
priv->fled_strobe_used = curr;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6370_strobe_get(struct led_classdev_flash *fl_cdev, bool *state)
{
struct mt6370_led *led = to_mt6370_led(fl_cdev, flash);
struct mt6370_priv *priv = led->priv;
mutex_lock(&priv->lock);
*state = !!(priv->fled_strobe_used & BIT(led->led_no));
mutex_unlock(&priv->lock);
return 0;
}
static int mt6370_timeout_set(struct led_classdev_flash *fl_cdev, u32 timeout)
{
struct mt6370_led *led = to_mt6370_led(fl_cdev, flash);
struct mt6370_priv *priv = led->priv;
struct led_flash_setting *s = &fl_cdev->timeout;
u32 val = (timeout - s->min) / s->step;
return regmap_update_bits(priv->regmap, MT6370_REG_STRBTO, MT6370_STRBTO_MASK, val);
}
static int mt6370_fault_get(struct led_classdev_flash *fl_cdev, u32 *fault)
{
struct mt6370_led *led = to_mt6370_led(fl_cdev, flash);
struct mt6370_priv *priv = led->priv;
u16 fled_stat;
unsigned int chg_stat, strobe_timeout_mask, fled_short_mask;
u32 rfault = 0;
int ret;
ret = regmap_read(priv->regmap, MT6370_REG_CHGSTAT2, &chg_stat);
if (ret)
return ret;
ret = regmap_raw_read(priv->regmap, MT6370_REG_FLEDSTAT1, &fled_stat, sizeof(fled_stat));
if (ret)
return ret;
switch (led->led_no) {
case MT6370_LED_FLASH1:
strobe_timeout_mask = MT6370_FLED1STRBTO_MASK;
fled_short_mask = MT6370_FLED1SHORT_MASK;
break;
case MT6370_LED_FLASH2:
strobe_timeout_mask = MT6370_FLED2STRBTO_MASK;
fled_short_mask = MT6370_FLED2SHORT_MASK;
break;
case MT6370_LED_JOINT:
strobe_timeout_mask = MT6370_FLED1STRBTO_MASK | MT6370_FLED2STRBTO_MASK;
fled_short_mask = MT6370_FLED1SHORT_MASK | MT6370_FLED2SHORT_MASK;
break;
default:
return -EINVAL;
}
if (chg_stat & MT6370_FLEDCHGVINOVP_MASK)
rfault |= LED_FAULT_INPUT_VOLTAGE;
if (fled_stat & strobe_timeout_mask)
rfault |= LED_FAULT_TIMEOUT;
if (fled_stat & fled_short_mask)
rfault |= LED_FAULT_SHORT_CIRCUIT;
if (fled_stat & MT6370_FLEDLVF_MASK)
rfault |= LED_FAULT_UNDER_VOLTAGE;
*fault = rfault;
return ret;
}
static const struct led_flash_ops mt6370_flash_ops = {
.flash_brightness_set = mt6370_flash_brightness_set,
.strobe_set = mt6370_strobe_set,
.strobe_get = mt6370_strobe_get,
.timeout_set = mt6370_timeout_set,
.fault_get = mt6370_fault_get,
};
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
static int mt6370_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
bool enable)
{
struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
struct mt6370_led *led = to_mt6370_led(flash, flash);
struct mt6370_priv *priv = led->priv;
u32 mask = led->led_no == MT6370_LED_JOINT ? MT6370_FLCSEN_MASK_ALL :
MT6370_FLCSEN_MASK(led->led_no);
u32 val = enable ? mask : 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, MT6370_REG_FLEDEN, mask, val);
if (ret)
goto unlock;
if (enable)
priv->fled_strobe_used |= BIT(led->led_no);
else
priv->fled_strobe_used &= ~BIT(led->led_no);
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static const struct v4l2_flash_ops v4l2_flash_ops = {
.external_strobe_set = mt6370_flash_external_strobe_set,
};
static void mt6370_init_v4l2_flash_config(struct mt6370_led *led, struct v4l2_flash_config *cfg)
{
struct led_classdev *lcdev;
struct led_flash_setting *s = &cfg->intensity;
lcdev = &led->flash.led_cdev;
s->min = MT6370_ITORCH_MIN_uA;
s->step = MT6370_ITORCH_STEP_uA;
s->val = s->max = s->min + (lcdev->max_brightness - 1) * s->step;
cfg->has_external_strobe = 1;
strscpy(cfg->dev_name, dev_name(lcdev->dev), sizeof(cfg->dev_name));
cfg->flash_faults = LED_FAULT_SHORT_CIRCUIT | LED_FAULT_TIMEOUT |
LED_FAULT_INPUT_VOLTAGE | LED_FAULT_UNDER_VOLTAGE;
}
#else
static const struct v4l2_flash_ops v4l2_flash_ops;
static void mt6370_init_v4l2_flash_config(struct mt6370_led *led, struct v4l2_flash_config *cfg)
{
}
#endif
static void mt6370_v4l2_flash_release(void *v4l2_flash)
{
v4l2_flash_release(v4l2_flash);
}
static int mt6370_led_register(struct device *parent, struct mt6370_led *led,
struct fwnode_handle *fwnode)
{
struct led_init_data init_data = { .fwnode = fwnode };
struct v4l2_flash_config v4l2_config = {};
int ret;
ret = devm_led_classdev_flash_register_ext(parent, &led->flash, &init_data);
if (ret)
return dev_err_probe(parent, ret, "Couldn't register flash %d\n", led->led_no);
mt6370_init_v4l2_flash_config(led, &v4l2_config);
led->v4l2_flash = v4l2_flash_init(parent, fwnode, &led->flash, &v4l2_flash_ops,
&v4l2_config);
if (IS_ERR(led->v4l2_flash))
return dev_err_probe(parent, PTR_ERR(led->v4l2_flash),
"Failed to register %d v4l2 sd\n", led->led_no);
return devm_add_action_or_reset(parent, mt6370_v4l2_flash_release, led->v4l2_flash);
}
static u32 mt6370_clamp(u32 val, u32 min, u32 max, u32 step)
{
u32 retval;
retval = clamp_val(val, min, max);
if (step > 1)
retval = rounddown(retval - min, step) + min;
return retval;
}
static int mt6370_init_flash_properties(struct device *dev, struct mt6370_led *led,
struct fwnode_handle *fwnode)
{
struct led_classdev_flash *flash = &led->flash;
struct led_classdev *lcdev = &flash->led_cdev;
struct mt6370_priv *priv = led->priv;
struct led_flash_setting *s;
u32 sources[MT6370_MAX_LEDS];
u32 max_ua, val;
int i, ret, num;
num = fwnode_property_count_u32(fwnode, "led-sources");
if (num < 1)
return dev_err_probe(dev, -EINVAL,
"Not specified or wrong number of led-sources\n");
ret = fwnode_property_read_u32_array(fwnode, "led-sources", sources, num);
if (ret)
return ret;
for (i = 0; i < num; i++) {
if (sources[i] >= MT6370_MAX_LEDS)
return -EINVAL;
if (priv->leds_active & BIT(sources[i]))
return -EINVAL;
priv->leds_active |= BIT(sources[i]);
}
/* If both channels are specified in 'led-sources', joint flash output mode is used */
led->led_no = num == 2 ? MT6370_LED_JOINT : sources[0];
max_ua = num == 2 ? MT6370_ITORCH_DOUBLE_MAX_uA : MT6370_ITORCH_MAX_uA;
val = MT6370_ITORCH_MIN_uA;
ret = fwnode_property_read_u32(fwnode, "led-max-microamp", &val);
if (!ret)
val = mt6370_clamp(val, MT6370_ITORCH_MIN_uA, max_ua, MT6370_ITORCH_STEP_uA);
lcdev->max_brightness = (val - MT6370_ITORCH_MIN_uA) / MT6370_ITORCH_STEP_uA + 1;
lcdev->brightness_set_blocking = mt6370_torch_brightness_set;
lcdev->flags |= LED_DEV_CAP_FLASH;
max_ua = num == 2 ? MT6370_ISTRB_DOUBLE_MAX_uA : MT6370_ISTRB_MAX_uA;
val = MT6370_ISTRB_MIN_uA;
ret = fwnode_property_read_u32(fwnode, "flash-max-microamp", &val);
if (!ret)
val = mt6370_clamp(val, MT6370_ISTRB_MIN_uA, max_ua, MT6370_ISTRB_STEP_uA);
s = &flash->brightness;
s->min = MT6370_ISTRB_MIN_uA;
s->step = MT6370_ISTRB_STEP_uA;
s->val = s->max = val;
/* Always configure to the minimum level when off to prevent flash current spikes. */
ret = _mt6370_flash_brightness_set(flash, s->min);
if (ret)
return ret;
val = MT6370_STRBTO_MIN_US;
ret = fwnode_property_read_u32(fwnode, "flash-max-timeout-us", &val);
if (!ret)
val = mt6370_clamp(val, MT6370_STRBTO_MIN_US, MT6370_STRBTO_MAX_US,
MT6370_STRBTO_STEP_US);
s = &flash->timeout;
s->min = MT6370_STRBTO_MIN_US;
s->step = MT6370_STRBTO_STEP_US;
s->val = s->max = val;
flash->ops = &mt6370_flash_ops;
return 0;
}
static int mt6370_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mt6370_priv *priv;
struct fwnode_handle *child;
size_t count;
int i = 0, ret;
count = device_get_child_node_count(dev);
if (!count || count > MT6370_MAX_LEDS)
return dev_err_probe(dev, -EINVAL,
"No child node or node count over max led number %zu\n", count);
priv = devm_kzalloc(dev, struct_size(priv, leds, count), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->leds_count = count;
mutex_init(&priv->lock);
priv->regmap = dev_get_regmap(dev->parent, NULL);
if (!priv->regmap)
return dev_err_probe(dev, -ENODEV, "Failed to get parent regmap\n");
device_for_each_child_node(dev, child) {
struct mt6370_led *led = priv->leds + i;
led->priv = priv;
ret = mt6370_init_flash_properties(dev, led, child);
if (ret) {
fwnode_handle_put(child);
return ret;
}
ret = mt6370_led_register(dev, led, child);
if (ret) {
fwnode_handle_put(child);
return ret;
}
i++;
}
return 0;
}
static const struct of_device_id mt6370_led_of_id[] = {
{ .compatible = "mediatek,mt6370-flashlight" },
{}
};
MODULE_DEVICE_TABLE(of, mt6370_led_of_id);
static struct platform_driver mt6370_led_driver = {
.driver = {
.name = "mt6370-flashlight",
.of_match_table = mt6370_led_of_id,
},
.probe = mt6370_led_probe,
};
module_platform_driver(mt6370_led_driver);
MODULE_AUTHOR("Alice Chen <[email protected]>");
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_DESCRIPTION("MT6370 FLASH LED Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/flash/leds-mt6370-flash.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 Luca Weiss <[email protected]>
#include <linux/gpio/consumer.h>
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/platform_device.h>
#include <media/v4l2-flash-led-class.h>
#define FLASH_TIMEOUT_DEFAULT 250000U /* 250ms */
#define FLASH_MAX_TIMEOUT_DEFAULT 300000U /* 300ms */
struct sgm3140 {
struct led_classdev_flash fled_cdev;
struct v4l2_flash *v4l2_flash;
struct timer_list powerdown_timer;
struct gpio_desc *flash_gpio;
struct gpio_desc *enable_gpio;
struct regulator *vin_regulator;
bool enabled;
/* current timeout in us */
u32 timeout;
/* maximum timeout in us */
u32 max_timeout;
};
static struct sgm3140 *flcdev_to_sgm3140(struct led_classdev_flash *flcdev)
{
return container_of(flcdev, struct sgm3140, fled_cdev);
}
static int sgm3140_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
{
struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
int ret;
if (priv->enabled == state)
return 0;
if (state) {
ret = regulator_enable(priv->vin_regulator);
if (ret) {
dev_err(fled_cdev->led_cdev.dev,
"failed to enable regulator: %d\n", ret);
return ret;
}
gpiod_set_value_cansleep(priv->flash_gpio, 1);
gpiod_set_value_cansleep(priv->enable_gpio, 1);
mod_timer(&priv->powerdown_timer,
jiffies + usecs_to_jiffies(priv->timeout));
} else {
del_timer_sync(&priv->powerdown_timer);
gpiod_set_value_cansleep(priv->enable_gpio, 0);
gpiod_set_value_cansleep(priv->flash_gpio, 0);
ret = regulator_disable(priv->vin_regulator);
if (ret) {
dev_err(fled_cdev->led_cdev.dev,
"failed to disable regulator: %d\n", ret);
return ret;
}
}
priv->enabled = state;
return 0;
}
static int sgm3140_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
{
struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
*state = timer_pending(&priv->powerdown_timer);
return 0;
}
static int sgm3140_timeout_set(struct led_classdev_flash *fled_cdev,
u32 timeout)
{
struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
priv->timeout = timeout;
return 0;
}
static const struct led_flash_ops sgm3140_flash_ops = {
.strobe_set = sgm3140_strobe_set,
.strobe_get = sgm3140_strobe_get,
.timeout_set = sgm3140_timeout_set,
};
static int sgm3140_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
bool enable = brightness == LED_ON;
int ret;
if (priv->enabled == enable)
return 0;
if (enable) {
ret = regulator_enable(priv->vin_regulator);
if (ret) {
dev_err(led_cdev->dev,
"failed to enable regulator: %d\n", ret);
return ret;
}
gpiod_set_value_cansleep(priv->enable_gpio, 1);
} else {
gpiod_set_value_cansleep(priv->enable_gpio, 0);
ret = regulator_disable(priv->vin_regulator);
if (ret) {
dev_err(led_cdev->dev,
"failed to disable regulator: %d\n", ret);
return ret;
}
}
priv->enabled = enable;
return 0;
}
static void sgm3140_powerdown_timer(struct timer_list *t)
{
struct sgm3140 *priv = from_timer(priv, t, powerdown_timer);
gpiod_set_value(priv->enable_gpio, 0);
gpiod_set_value(priv->flash_gpio, 0);
regulator_disable(priv->vin_regulator);
priv->enabled = false;
}
static void sgm3140_init_flash_timeout(struct sgm3140 *priv)
{
struct led_classdev_flash *fled_cdev = &priv->fled_cdev;
struct led_flash_setting *s;
/* Init flash timeout setting */
s = &fled_cdev->timeout;
s->min = 1;
s->max = priv->max_timeout;
s->step = 1;
s->val = FLASH_TIMEOUT_DEFAULT;
}
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
struct v4l2_flash_config *v4l2_sd_cfg)
{
struct led_classdev *led_cdev = &priv->fled_cdev.led_cdev;
struct led_flash_setting *s;
strscpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
sizeof(v4l2_sd_cfg->dev_name));
/* Init flash intensity setting */
s = &v4l2_sd_cfg->intensity;
s->min = 0;
s->max = 1;
s->step = 1;
s->val = 1;
}
#else
static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
struct v4l2_flash_config *v4l2_sd_cfg)
{
}
#endif
static int sgm3140_probe(struct platform_device *pdev)
{
struct sgm3140 *priv;
struct led_classdev *led_cdev;
struct led_classdev_flash *fled_cdev;
struct led_init_data init_data = {};
struct fwnode_handle *child_node;
struct v4l2_flash_config v4l2_sd_cfg = {};
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->flash_gpio = devm_gpiod_get(&pdev->dev, "flash", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(priv->flash_gpio);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Failed to request flash gpio\n");
priv->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(priv->enable_gpio);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Failed to request enable gpio\n");
priv->vin_regulator = devm_regulator_get(&pdev->dev, "vin");
ret = PTR_ERR_OR_ZERO(priv->vin_regulator);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"Failed to request regulator\n");
child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
NULL);
if (!child_node) {
dev_err(&pdev->dev,
"No fwnode child node found for connected LED.\n");
return -EINVAL;
}
ret = fwnode_property_read_u32(child_node, "flash-max-timeout-us",
&priv->max_timeout);
if (ret) {
priv->max_timeout = FLASH_MAX_TIMEOUT_DEFAULT;
dev_warn(&pdev->dev,
"flash-max-timeout-us property missing\n");
}
/*
* Set default timeout to FLASH_DEFAULT_TIMEOUT except if max_timeout
* from DT is lower.
*/
priv->timeout = min(priv->max_timeout, FLASH_TIMEOUT_DEFAULT);
timer_setup(&priv->powerdown_timer, sgm3140_powerdown_timer, 0);
fled_cdev = &priv->fled_cdev;
led_cdev = &fled_cdev->led_cdev;
fled_cdev->ops = &sgm3140_flash_ops;
led_cdev->brightness_set_blocking = sgm3140_brightness_set;
led_cdev->max_brightness = LED_ON;
led_cdev->flags |= LED_DEV_CAP_FLASH;
sgm3140_init_flash_timeout(priv);
init_data.fwnode = child_node;
platform_set_drvdata(pdev, priv);
/* Register in the LED subsystem */
ret = devm_led_classdev_flash_register_ext(&pdev->dev,
fled_cdev, &init_data);
if (ret) {
dev_err(&pdev->dev, "Failed to register flash device: %d\n",
ret);
goto err;
}
sgm3140_init_v4l2_flash_config(priv, &v4l2_sd_cfg);
/* Create V4L2 Flash subdev */
priv->v4l2_flash = v4l2_flash_init(&pdev->dev,
child_node,
fled_cdev, NULL,
&v4l2_sd_cfg);
if (IS_ERR(priv->v4l2_flash)) {
ret = PTR_ERR(priv->v4l2_flash);
goto err;
}
return ret;
err:
fwnode_handle_put(child_node);
return ret;
}
static int sgm3140_remove(struct platform_device *pdev)
{
struct sgm3140 *priv = platform_get_drvdata(pdev);
del_timer_sync(&priv->powerdown_timer);
v4l2_flash_release(priv->v4l2_flash);
return 0;
}
static const struct of_device_id sgm3140_dt_match[] = {
{ .compatible = "ocs,ocp8110" },
{ .compatible = "richtek,rt5033-led" },
{ .compatible = "sgmicro,sgm3140" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sgm3140_dt_match);
static struct platform_driver sgm3140_driver = {
.probe = sgm3140_probe,
.remove = sgm3140_remove,
.driver = {
.name = "sgm3140",
.of_match_table = sgm3140_dt_match,
},
};
module_platform_driver(sgm3140_driver);
MODULE_AUTHOR("Luca Weiss <[email protected]>");
MODULE_DESCRIPTION("SG Micro SGM3140 charge pump LED driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/flash/leds-sgm3140.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/led-class-flash.h>
#include <linux/led-class-multicolor.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <media/v4l2-flash-led-class.h>
enum {
MT6360_LED_ISNK1 = 0,
MT6360_LED_ISNK2,
MT6360_LED_ISNK3,
MT6360_LED_ISNKML,
MT6360_LED_FLASH1,
MT6360_LED_FLASH2,
MT6360_MAX_LEDS
};
#define MT6360_REG_RGBEN 0x380
#define MT6360_REG_ISNK(_led_no) (0x381 + (_led_no))
#define MT6360_ISNK_ENMASK(_led_no) BIT(7 - (_led_no))
#define MT6360_ISNK_MASK GENMASK(4, 0)
#define MT6360_CHRINDSEL_MASK BIT(3)
/* Virtual definition for multicolor */
#define MT6360_VIRTUAL_MULTICOLOR (MT6360_MAX_LEDS + 1)
#define MULTICOLOR_NUM_CHANNELS 3
#define MT6360_REG_FLEDEN 0x37E
#define MT6360_REG_STRBTO 0x373
#define MT6360_REG_FLEDBASE(_id) (0x372 + 4 * (_id - MT6360_LED_FLASH1))
#define MT6360_REG_FLEDISTRB(_id) (MT6360_REG_FLEDBASE(_id) + 2)
#define MT6360_REG_FLEDITOR(_id) (MT6360_REG_FLEDBASE(_id) + 3)
#define MT6360_REG_CHGSTAT2 0x3E1
#define MT6360_REG_FLEDSTAT1 0x3E9
#define MT6360_ITORCH_MASK GENMASK(4, 0)
#define MT6360_ISTROBE_MASK GENMASK(6, 0)
#define MT6360_STRBTO_MASK GENMASK(6, 0)
#define MT6360_TORCHEN_MASK BIT(3)
#define MT6360_STROBEN_MASK BIT(2)
#define MT6360_FLCSEN_MASK(_id) BIT(MT6360_LED_FLASH2 - _id)
#define MT6360_FLEDCHGVINOVP_MASK BIT(3)
#define MT6360_FLED1STRBTO_MASK BIT(11)
#define MT6360_FLED2STRBTO_MASK BIT(10)
#define MT6360_FLED1STRB_MASK BIT(9)
#define MT6360_FLED2STRB_MASK BIT(8)
#define MT6360_FLED1SHORT_MASK BIT(7)
#define MT6360_FLED2SHORT_MASK BIT(6)
#define MT6360_FLEDLVF_MASK BIT(3)
#define MT6360_ISNKRGB_STEPUA 2000
#define MT6360_ISNKRGB_MAXUA 24000
#define MT6360_ISNKML_STEPUA 5000
#define MT6360_ISNKML_MAXUA 150000
#define MT6360_ITORCH_MINUA 25000
#define MT6360_ITORCH_STEPUA 12500
#define MT6360_ITORCH_MAXUA 400000
#define MT6360_ISTRB_MINUA 50000
#define MT6360_ISTRB_STEPUA 12500
#define MT6360_ISTRB_MAXUA 1500000
#define MT6360_STRBTO_MINUS 64000
#define MT6360_STRBTO_STEPUS 32000
#define MT6360_STRBTO_MAXUS 2432000
struct mt6360_led {
union {
struct led_classdev isnk;
struct led_classdev_mc mc;
struct led_classdev_flash flash;
};
struct v4l2_flash *v4l2_flash;
struct mt6360_priv *priv;
u32 led_no;
enum led_default_state default_state;
};
struct mt6360_priv {
struct device *dev;
struct regmap *regmap;
struct mutex lock;
unsigned int fled_strobe_used;
unsigned int fled_torch_used;
unsigned int leds_active;
unsigned int leds_count;
struct mt6360_led leds[];
};
static int mt6360_mc_brightness_set(struct led_classdev *lcdev,
enum led_brightness level)
{
struct led_classdev_mc *mccdev = lcdev_to_mccdev(lcdev);
struct mt6360_led *led = container_of(mccdev, struct mt6360_led, mc);
struct mt6360_priv *priv = led->priv;
u32 real_bright, enable_mask = 0, enable = 0;
int i, ret;
mutex_lock(&priv->lock);
led_mc_calc_color_components(mccdev, level);
for (i = 0; i < mccdev->num_colors; i++) {
struct mc_subled *subled = mccdev->subled_info + i;
real_bright = min(lcdev->max_brightness, subled->brightness);
ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(i),
MT6360_ISNK_MASK, real_bright);
if (ret)
goto out;
enable_mask |= MT6360_ISNK_ENMASK(subled->channel);
if (real_bright)
enable |= MT6360_ISNK_ENMASK(subled->channel);
}
ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
enable);
out:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6360_isnk_brightness_set(struct led_classdev *lcdev,
enum led_brightness level)
{
struct mt6360_led *led = container_of(lcdev, struct mt6360_led, isnk);
struct mt6360_priv *priv = led->priv;
u32 enable_mask = MT6360_ISNK_ENMASK(led->led_no);
u32 val = level ? MT6360_ISNK_ENMASK(led->led_no) : 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(led->led_no),
MT6360_ISNK_MASK, level);
if (ret)
goto out;
ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
val);
out:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6360_torch_brightness_set(struct led_classdev *lcdev,
enum led_brightness level)
{
struct mt6360_led *led =
container_of(lcdev, struct mt6360_led, flash.led_cdev);
struct mt6360_priv *priv = led->priv;
u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
u32 val = level ? MT6360_FLCSEN_MASK(led->led_no) : 0;
u32 prev = priv->fled_torch_used, curr;
int ret;
mutex_lock(&priv->lock);
/*
* Only one set of flash control logic, use the flag to avoid strobe is
* currently used.
*/
if (priv->fled_strobe_used) {
dev_warn(lcdev->dev, "Please disable strobe first [%d]\n",
priv->fled_strobe_used);
ret = -EBUSY;
goto unlock;
}
if (level)
curr = prev | BIT(led->led_no);
else
curr = prev & ~BIT(led->led_no);
if (curr)
val |= MT6360_TORCHEN_MASK;
if (level) {
ret = regmap_update_bits(priv->regmap,
MT6360_REG_FLEDITOR(led->led_no),
MT6360_ITORCH_MASK, level - 1);
if (ret)
goto unlock;
}
ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
val);
if (ret)
goto unlock;
priv->fled_torch_used = curr;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
u32 brightness)
{
/*
* Due to the current spike when turning on flash, let brightness to be
* kept by framework.
* This empty function is used to prevent led_classdev_flash register
* ops check failure.
*/
return 0;
}
static int _mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
u32 brightness)
{
struct mt6360_led *led =
container_of(fl_cdev, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
struct led_flash_setting *s = &fl_cdev->brightness;
u32 val = (brightness - s->min) / s->step;
return regmap_update_bits(priv->regmap,
MT6360_REG_FLEDISTRB(led->led_no),
MT6360_ISTROBE_MASK, val);
}
static int mt6360_strobe_set(struct led_classdev_flash *fl_cdev, bool state)
{
struct mt6360_led *led =
container_of(fl_cdev, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
struct led_classdev *lcdev = &fl_cdev->led_cdev;
struct led_flash_setting *s = &fl_cdev->brightness;
u32 enable_mask = MT6360_STROBEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
u32 val = state ? MT6360_FLCSEN_MASK(led->led_no) : 0;
u32 prev = priv->fled_strobe_used, curr;
int ret;
mutex_lock(&priv->lock);
/*
* Only one set of flash control logic, use the flag to avoid torch is
* currently used
*/
if (priv->fled_torch_used) {
dev_warn(lcdev->dev, "Please disable torch first [0x%x]\n",
priv->fled_torch_used);
ret = -EBUSY;
goto unlock;
}
if (state)
curr = prev | BIT(led->led_no);
else
curr = prev & ~BIT(led->led_no);
if (curr)
val |= MT6360_STROBEN_MASK;
ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
val);
if (ret) {
dev_err(lcdev->dev, "[%d] control current source %d fail\n",
led->led_no, state);
goto unlock;
}
/*
* If the flash need to be on, config the flash current ramping up to
* the setting value.
* Else, always recover back to the minimum one
*/
ret = _mt6360_flash_brightness_set(fl_cdev, state ? s->val : s->min);
if (ret)
goto unlock;
/*
* For the flash turn on/off, HW rampping up/down time is 5ms/500us,
* respectively.
*/
if (!prev && curr)
usleep_range(5000, 6000);
else if (prev && !curr)
udelay(500);
priv->fled_strobe_used = curr;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int mt6360_strobe_get(struct led_classdev_flash *fl_cdev, bool *state)
{
struct mt6360_led *led =
container_of(fl_cdev, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
mutex_lock(&priv->lock);
*state = !!(priv->fled_strobe_used & BIT(led->led_no));
mutex_unlock(&priv->lock);
return 0;
}
static int mt6360_timeout_set(struct led_classdev_flash *fl_cdev, u32 timeout)
{
struct mt6360_led *led =
container_of(fl_cdev, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
struct led_flash_setting *s = &fl_cdev->timeout;
u32 val = (timeout - s->min) / s->step;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, MT6360_REG_STRBTO,
MT6360_STRBTO_MASK, val);
mutex_unlock(&priv->lock);
return ret;
}
static int mt6360_fault_get(struct led_classdev_flash *fl_cdev, u32 *fault)
{
struct mt6360_led *led =
container_of(fl_cdev, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
u16 fled_stat;
unsigned int chg_stat, strobe_timeout_mask, fled_short_mask;
u32 rfault = 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_read(priv->regmap, MT6360_REG_CHGSTAT2, &chg_stat);
if (ret)
goto unlock;
ret = regmap_raw_read(priv->regmap, MT6360_REG_FLEDSTAT1, &fled_stat,
sizeof(fled_stat));
if (ret)
goto unlock;
if (led->led_no == MT6360_LED_FLASH1) {
strobe_timeout_mask = MT6360_FLED1STRBTO_MASK;
fled_short_mask = MT6360_FLED1SHORT_MASK;
} else {
strobe_timeout_mask = MT6360_FLED2STRBTO_MASK;
fled_short_mask = MT6360_FLED2SHORT_MASK;
}
if (chg_stat & MT6360_FLEDCHGVINOVP_MASK)
rfault |= LED_FAULT_INPUT_VOLTAGE;
if (fled_stat & strobe_timeout_mask)
rfault |= LED_FAULT_TIMEOUT;
if (fled_stat & fled_short_mask)
rfault |= LED_FAULT_SHORT_CIRCUIT;
if (fled_stat & MT6360_FLEDLVF_MASK)
rfault |= LED_FAULT_UNDER_VOLTAGE;
*fault = rfault;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static const struct led_flash_ops mt6360_flash_ops = {
.flash_brightness_set = mt6360_flash_brightness_set,
.strobe_set = mt6360_strobe_set,
.strobe_get = mt6360_strobe_get,
.timeout_set = mt6360_timeout_set,
.fault_get = mt6360_fault_get,
};
static int mt6360_isnk_init_default_state(struct mt6360_led *led)
{
struct mt6360_priv *priv = led->priv;
unsigned int regval;
u32 level;
int ret;
ret = regmap_read(priv->regmap, MT6360_REG_ISNK(led->led_no), ®val);
if (ret)
return ret;
level = regval & MT6360_ISNK_MASK;
ret = regmap_read(priv->regmap, MT6360_REG_RGBEN, ®val);
if (ret)
return ret;
if (!(regval & MT6360_ISNK_ENMASK(led->led_no)))
level = LED_OFF;
switch (led->default_state) {
case LEDS_DEFSTATE_ON:
led->isnk.brightness = led->isnk.max_brightness;
break;
case LEDS_DEFSTATE_KEEP:
led->isnk.brightness = min(level, led->isnk.max_brightness);
break;
default:
led->isnk.brightness = LED_OFF;
}
return mt6360_isnk_brightness_set(&led->isnk, led->isnk.brightness);
}
static int mt6360_flash_init_default_state(struct mt6360_led *led)
{
struct led_classdev_flash *flash = &led->flash;
struct mt6360_priv *priv = led->priv;
u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
u32 level;
unsigned int regval;
int ret;
ret = regmap_read(priv->regmap, MT6360_REG_FLEDITOR(led->led_no),
®val);
if (ret)
return ret;
level = regval & MT6360_ITORCH_MASK;
ret = regmap_read(priv->regmap, MT6360_REG_FLEDEN, ®val);
if (ret)
return ret;
if ((regval & enable_mask) == enable_mask)
level += 1;
else
level = LED_OFF;
switch (led->default_state) {
case LEDS_DEFSTATE_ON:
flash->led_cdev.brightness = flash->led_cdev.max_brightness;
break;
case LEDS_DEFSTATE_KEEP:
flash->led_cdev.brightness =
min(level, flash->led_cdev.max_brightness);
break;
default:
flash->led_cdev.brightness = LED_OFF;
}
return mt6360_torch_brightness_set(&flash->led_cdev,
flash->led_cdev.brightness);
}
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
static int mt6360_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
bool enable)
{
struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
struct mt6360_led *led = container_of(flash, struct mt6360_led, flash);
struct mt6360_priv *priv = led->priv;
u32 mask = MT6360_FLCSEN_MASK(led->led_no);
u32 val = enable ? mask : 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, mask, val);
if (ret)
goto unlock;
if (enable)
priv->fled_strobe_used |= BIT(led->led_no);
else
priv->fled_strobe_used &= ~BIT(led->led_no);
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static const struct v4l2_flash_ops v4l2_flash_ops = {
.external_strobe_set = mt6360_flash_external_strobe_set,
};
static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
struct v4l2_flash_config *config)
{
struct led_classdev *lcdev;
struct led_flash_setting *s = &config->intensity;
lcdev = &led->flash.led_cdev;
s->min = MT6360_ITORCH_MINUA;
s->step = MT6360_ITORCH_STEPUA;
s->val = s->max = s->min + (lcdev->max_brightness - 1) * s->step;
config->has_external_strobe = 1;
strscpy(config->dev_name, lcdev->dev->kobj.name,
sizeof(config->dev_name));
config->flash_faults = LED_FAULT_SHORT_CIRCUIT | LED_FAULT_TIMEOUT |
LED_FAULT_INPUT_VOLTAGE |
LED_FAULT_UNDER_VOLTAGE;
}
#else
static const struct v4l2_flash_ops v4l2_flash_ops;
static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
struct v4l2_flash_config *config)
{
}
#endif
static int mt6360_led_register(struct device *parent, struct mt6360_led *led,
struct led_init_data *init_data)
{
struct mt6360_priv *priv = led->priv;
struct v4l2_flash_config v4l2_config = {0};
int ret;
if ((led->led_no == MT6360_LED_ISNK1 ||
led->led_no == MT6360_VIRTUAL_MULTICOLOR) &&
(priv->leds_active & BIT(MT6360_LED_ISNK1))) {
/*
* Change isink1 to SW control mode, disconnect it with
* charger state
*/
ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN,
MT6360_CHRINDSEL_MASK,
MT6360_CHRINDSEL_MASK);
if (ret) {
dev_err(parent, "Failed to config ISNK1 to SW mode\n");
return ret;
}
}
switch (led->led_no) {
case MT6360_VIRTUAL_MULTICOLOR:
ret = mt6360_mc_brightness_set(&led->mc.led_cdev, LED_OFF);
if (ret) {
dev_err(parent,
"Failed to init multicolor brightness\n");
return ret;
}
ret = devm_led_classdev_multicolor_register_ext(parent,
&led->mc, init_data);
if (ret) {
dev_err(parent, "Couldn't register multicolor\n");
return ret;
}
break;
case MT6360_LED_ISNK1 ... MT6360_LED_ISNKML:
ret = mt6360_isnk_init_default_state(led);
if (ret) {
dev_err(parent, "Failed to init %d isnk state\n",
led->led_no);
return ret;
}
ret = devm_led_classdev_register_ext(parent, &led->isnk,
init_data);
if (ret) {
dev_err(parent, "Couldn't register isink %d\n",
led->led_no);
return ret;
}
break;
default:
ret = mt6360_flash_init_default_state(led);
if (ret) {
dev_err(parent, "Failed to init %d flash state\n",
led->led_no);
return ret;
}
ret = devm_led_classdev_flash_register_ext(parent, &led->flash,
init_data);
if (ret) {
dev_err(parent, "Couldn't register flash %d\n",
led->led_no);
return ret;
}
mt6360_init_v4l2_flash_config(led, &v4l2_config);
led->v4l2_flash = v4l2_flash_init(parent, init_data->fwnode,
&led->flash,
&v4l2_flash_ops,
&v4l2_config);
if (IS_ERR(led->v4l2_flash)) {
dev_err(parent, "Failed to register %d v4l2 sd\n",
led->led_no);
return PTR_ERR(led->v4l2_flash);
}
}
return 0;
}
static u32 clamp_align(u32 val, u32 min, u32 max, u32 step)
{
u32 retval;
retval = clamp_val(val, min, max);
if (step > 1)
retval = rounddown(retval - min, step) + min;
return retval;
}
static int mt6360_init_isnk_properties(struct mt6360_led *led,
struct led_init_data *init_data)
{
struct led_classdev *lcdev;
struct mt6360_priv *priv = led->priv;
struct fwnode_handle *child;
u32 step_uA = MT6360_ISNKRGB_STEPUA, max_uA = MT6360_ISNKRGB_MAXUA;
u32 val;
int num_color = 0, ret;
if (led->led_no == MT6360_VIRTUAL_MULTICOLOR) {
struct mc_subled *sub_led;
sub_led = devm_kzalloc(priv->dev,
sizeof(*sub_led) * MULTICOLOR_NUM_CHANNELS, GFP_KERNEL);
if (!sub_led)
return -ENOMEM;
fwnode_for_each_child_node(init_data->fwnode, child) {
u32 reg, color;
ret = fwnode_property_read_u32(child, "reg", ®);
if (ret || reg > MT6360_LED_ISNK3 ||
priv->leds_active & BIT(reg))
return -EINVAL;
ret = fwnode_property_read_u32(child, "color", &color);
if (ret) {
dev_err(priv->dev,
"led %d, no color specified\n",
led->led_no);
return ret;
}
priv->leds_active |= BIT(reg);
sub_led[num_color].color_index = color;
sub_led[num_color].channel = reg;
num_color++;
}
if (num_color < 2) {
dev_err(priv->dev,
"Multicolor must include 2 or more led channel\n");
return -EINVAL;
}
led->mc.num_colors = num_color;
led->mc.subled_info = sub_led;
lcdev = &led->mc.led_cdev;
lcdev->brightness_set_blocking = mt6360_mc_brightness_set;
} else {
if (led->led_no == MT6360_LED_ISNKML) {
step_uA = MT6360_ISNKML_STEPUA;
max_uA = MT6360_ISNKML_MAXUA;
}
lcdev = &led->isnk;
lcdev->brightness_set_blocking = mt6360_isnk_brightness_set;
}
ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
&val);
if (ret) {
dev_warn(priv->dev,
"Not specified led-max-microamp, config to the minimum\n");
val = step_uA;
} else
val = clamp_align(val, 0, max_uA, step_uA);
lcdev->max_brightness = val / step_uA;
fwnode_property_read_string(init_data->fwnode, "linux,default-trigger",
&lcdev->default_trigger);
return 0;
}
static int mt6360_init_flash_properties(struct mt6360_led *led,
struct led_init_data *init_data)
{
struct led_classdev_flash *flash = &led->flash;
struct led_classdev *lcdev = &flash->led_cdev;
struct mt6360_priv *priv = led->priv;
struct led_flash_setting *s;
u32 val;
int ret;
ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
&val);
if (ret) {
dev_warn(priv->dev,
"Not specified led-max-microamp, config to the minimum\n");
val = MT6360_ITORCH_MINUA;
} else
val = clamp_align(val, MT6360_ITORCH_MINUA, MT6360_ITORCH_MAXUA,
MT6360_ITORCH_STEPUA);
lcdev->max_brightness =
(val - MT6360_ITORCH_MINUA) / MT6360_ITORCH_STEPUA + 1;
lcdev->brightness_set_blocking = mt6360_torch_brightness_set;
lcdev->flags |= LED_DEV_CAP_FLASH;
ret = fwnode_property_read_u32(init_data->fwnode, "flash-max-microamp",
&val);
if (ret) {
dev_warn(priv->dev,
"Not specified flash-max-microamp, config to the minimum\n");
val = MT6360_ISTRB_MINUA;
} else
val = clamp_align(val, MT6360_ISTRB_MINUA, MT6360_ISTRB_MAXUA,
MT6360_ISTRB_STEPUA);
s = &flash->brightness;
s->min = MT6360_ISTRB_MINUA;
s->step = MT6360_ISTRB_STEPUA;
s->val = s->max = val;
/*
* Always configure as min level when off to prevent flash current
* spike.
*/
ret = _mt6360_flash_brightness_set(flash, s->min);
if (ret)
return ret;
ret = fwnode_property_read_u32(init_data->fwnode,
"flash-max-timeout-us", &val);
if (ret) {
dev_warn(priv->dev,
"Not specified flash-max-timeout-us, config to the minimum\n");
val = MT6360_STRBTO_MINUS;
} else
val = clamp_align(val, MT6360_STRBTO_MINUS, MT6360_STRBTO_MAXUS,
MT6360_STRBTO_STEPUS);
s = &flash->timeout;
s->min = MT6360_STRBTO_MINUS;
s->step = MT6360_STRBTO_STEPUS;
s->val = s->max = val;
flash->ops = &mt6360_flash_ops;
return 0;
}
static void mt6360_v4l2_flash_release(struct mt6360_priv *priv)
{
int i;
for (i = 0; i < priv->leds_count; i++) {
struct mt6360_led *led = priv->leds + i;
if (led->v4l2_flash)
v4l2_flash_release(led->v4l2_flash);
}
}
static int mt6360_led_probe(struct platform_device *pdev)
{
struct mt6360_priv *priv;
struct fwnode_handle *child;
size_t count;
int i = 0, ret;
count = device_get_child_node_count(&pdev->dev);
if (!count || count > MT6360_MAX_LEDS) {
dev_err(&pdev->dev,
"No child node or node count over max led number %zu\n",
count);
return -EINVAL;
}
priv = devm_kzalloc(&pdev->dev,
struct_size(priv, leds, count), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->leds_count = count;
priv->dev = &pdev->dev;
mutex_init(&priv->lock);
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
dev_err(&pdev->dev, "Failed to get parent regmap\n");
return -ENODEV;
}
device_for_each_child_node(&pdev->dev, child) {
struct mt6360_led *led = priv->leds + i;
struct led_init_data init_data = { .fwnode = child, };
u32 reg, led_color;
ret = fwnode_property_read_u32(child, "color", &led_color);
if (ret)
goto out_flash_release;
if (led_color == LED_COLOR_ID_RGB ||
led_color == LED_COLOR_ID_MULTI)
reg = MT6360_VIRTUAL_MULTICOLOR;
else {
ret = fwnode_property_read_u32(child, "reg", ®);
if (ret)
goto out_flash_release;
if (reg >= MT6360_MAX_LEDS) {
ret = -EINVAL;
goto out_flash_release;
}
}
if (priv->leds_active & BIT(reg)) {
ret = -EINVAL;
goto out_flash_release;
}
priv->leds_active |= BIT(reg);
led->led_no = reg;
led->priv = priv;
led->default_state = led_init_default_state_get(child);
if (reg == MT6360_VIRTUAL_MULTICOLOR ||
reg <= MT6360_LED_ISNKML)
ret = mt6360_init_isnk_properties(led, &init_data);
else
ret = mt6360_init_flash_properties(led, &init_data);
if (ret)
goto out_flash_release;
ret = mt6360_led_register(&pdev->dev, led, &init_data);
if (ret)
goto out_flash_release;
i++;
}
platform_set_drvdata(pdev, priv);
return 0;
out_flash_release:
mt6360_v4l2_flash_release(priv);
return ret;
}
static int mt6360_led_remove(struct platform_device *pdev)
{
struct mt6360_priv *priv = platform_get_drvdata(pdev);
mt6360_v4l2_flash_release(priv);
return 0;
}
static const struct of_device_id __maybe_unused mt6360_led_of_id[] = {
{ .compatible = "mediatek,mt6360-led", },
{}
};
MODULE_DEVICE_TABLE(of, mt6360_led_of_id);
static struct platform_driver mt6360_led_driver = {
.driver = {
.name = "mt6360-led",
.of_match_table = mt6360_led_of_id,
},
.probe = mt6360_led_probe,
.remove = mt6360_led_remove,
};
module_platform_driver(mt6360_led_driver);
MODULE_AUTHOR("Gene Chen <[email protected]>");
MODULE_DESCRIPTION("MT6360 LED Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/flash/leds-mt6360.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LED driver for Richtek RT8515 flash/torch white LEDs
* found on some Samsung mobile phones.
*
* This is a 1.5A Boost dual channel driver produced around 2011.
*
* The component lacks a datasheet, but in the schematic picture
* from the LG P970 service manual you can see the connections
* from the RT8515 to the LED, with two resistors connected
* from the pins "RFS" and "RTS" to ground.
*
* On the LG P970:
* RFS (resistance flash setting?) is 20 kOhm
* RTS (resistance torch setting?) is 39 kOhm
*
* Some sleuthing finds us the RT9387A which we have a datasheet for:
* https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
* This apparently works the same way so in theory this driver
* should cover RT9387A as well. This has not been tested, please
* update the compatibles if you add RT9387A support.
*
* Linus Walleij <[email protected]>
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/led-class-flash.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <media/v4l2-flash-led-class.h>
/* We can provide 15-700 mA out to the LED */
#define RT8515_MIN_IOUT_MA 15
#define RT8515_MAX_IOUT_MA 700
/* The maximum intensity is 1-16 for flash and 1-100 for torch */
#define RT8515_FLASH_MAX 16
#define RT8515_TORCH_MAX 100
#define RT8515_TIMEOUT_US 250000U
#define RT8515_MAX_TIMEOUT_US 300000U
struct rt8515 {
struct led_classdev_flash fled;
struct device *dev;
struct v4l2_flash *v4l2_flash;
struct mutex lock;
struct regulator *reg;
struct gpio_desc *enable_torch;
struct gpio_desc *enable_flash;
struct timer_list powerdown_timer;
u32 max_timeout; /* Flash max timeout */
int flash_max_intensity;
int torch_max_intensity;
};
static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
{
return container_of(fled, struct rt8515, fled);
}
static void rt8515_gpio_led_off(struct rt8515 *rt)
{
gpiod_set_value(rt->enable_flash, 0);
gpiod_set_value(rt->enable_torch, 0);
}
static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
int brightness)
{
int i;
/*
* Toggling a GPIO line with a small delay increases the
* brightness one step at a time.
*/
for (i = 0; i < brightness; i++) {
gpiod_set_value(gpiod, 0);
udelay(1);
gpiod_set_value(gpiod, 1);
udelay(1);
}
}
/* This is setting the torch light level */
static int rt8515_led_brightness_set(struct led_classdev *led,
enum led_brightness brightness)
{
struct led_classdev_flash *fled = lcdev_to_flcdev(led);
struct rt8515 *rt = to_rt8515(fled);
mutex_lock(&rt->lock);
if (brightness == LED_OFF) {
/* Off */
rt8515_gpio_led_off(rt);
} else if (brightness < RT8515_TORCH_MAX) {
/* Step it up to movie mode brightness using the flash pin */
rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
} else {
/* Max torch brightness requested */
gpiod_set_value(rt->enable_torch, 1);
}
mutex_unlock(&rt->lock);
return 0;
}
static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
bool state)
{
struct rt8515 *rt = to_rt8515(fled);
struct led_flash_setting *timeout = &fled->timeout;
int brightness = rt->flash_max_intensity;
mutex_lock(&rt->lock);
if (state) {
/* Enable LED flash mode and set brightness */
rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
/* Set timeout */
mod_timer(&rt->powerdown_timer,
jiffies + usecs_to_jiffies(timeout->val));
} else {
del_timer_sync(&rt->powerdown_timer);
/* Turn the LED off */
rt8515_gpio_led_off(rt);
}
fled->led_cdev.brightness = LED_OFF;
/* After this the torch LED will be disabled */
mutex_unlock(&rt->lock);
return 0;
}
static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
bool *state)
{
struct rt8515 *rt = to_rt8515(fled);
*state = timer_pending(&rt->powerdown_timer);
return 0;
}
static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
u32 timeout)
{
/* The timeout is stored in the led-class-flash core */
return 0;
}
static const struct led_flash_ops rt8515_flash_ops = {
.strobe_set = rt8515_led_flash_strobe_set,
.strobe_get = rt8515_led_flash_strobe_get,
.timeout_set = rt8515_led_flash_timeout_set,
};
static void rt8515_powerdown_timer(struct timer_list *t)
{
struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
/* Turn the LED off */
rt8515_gpio_led_off(rt);
}
static void rt8515_init_flash_timeout(struct rt8515 *rt)
{
struct led_classdev_flash *fled = &rt->fled;
struct led_flash_setting *s;
/* Init flash timeout setting */
s = &fled->timeout;
s->min = 1;
s->max = rt->max_timeout;
s->step = 1;
/*
* Set default timeout to RT8515_TIMEOUT_US except if
* max_timeout from DT is lower.
*/
s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
}
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
/* Configure the V2L2 flash subdevice */
static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
struct v4l2_flash_config *v4l2_sd_cfg)
{
struct led_classdev *led = &rt->fled.led_cdev;
struct led_flash_setting *s;
strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
sizeof(v4l2_sd_cfg->dev_name));
/*
* Init flash intensity setting: this is a linear scale
* capped from the device tree max intensity setting
* 1..flash_max_intensity
*/
s = &v4l2_sd_cfg->intensity;
s->min = 1;
s->max = rt->flash_max_intensity;
s->step = 1;
s->val = s->max;
}
static void rt8515_v4l2_flash_release(struct rt8515 *rt)
{
v4l2_flash_release(rt->v4l2_flash);
}
#else
static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
struct v4l2_flash_config *v4l2_sd_cfg)
{
}
static void rt8515_v4l2_flash_release(struct rt8515 *rt)
{
}
#endif
static void rt8515_determine_max_intensity(struct rt8515 *rt,
struct fwnode_handle *led,
const char *resistance,
const char *max_ua_prop, int hw_max,
int *max_intensity_setting)
{
u32 res = 0; /* Can't be 0 so 0 is undefined */
u32 ua;
u32 max_ma;
int max_intensity;
int ret;
fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
/* Missing info in DT, OK go with hardware maxima */
if (ret || res == 0) {
dev_err(rt->dev,
"either %s or %s missing from DT, using HW max\n",
resistance, max_ua_prop);
max_ma = RT8515_MAX_IOUT_MA;
max_intensity = hw_max;
goto out_assign_max;
}
/*
* Formula from the datasheet, this is the maximum current
* defined by the hardware.
*/
max_ma = (5500 * 1000) / res;
/*
* Calculate max intensity (linear scaling)
* Formula is ((ua / 1000) / max_ma) * 100, then simplified
*/
max_intensity = (ua / 10) / max_ma;
dev_info(rt->dev,
"current restricted from %u to %u mA, max intensity %d/100\n",
max_ma, (ua / 1000), max_intensity);
out_assign_max:
dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
max_intensity, hw_max, max_ma);
*max_intensity_setting = max_intensity;
}
static int rt8515_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fwnode_handle *child;
struct rt8515 *rt;
struct led_classdev *led;
struct led_classdev_flash *fled;
struct led_init_data init_data = {};
struct v4l2_flash_config v4l2_sd_cfg = {};
int ret;
rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
if (!rt)
return -ENOMEM;
rt->dev = dev;
fled = &rt->fled;
led = &fled->led_cdev;
/* ENF - Enable Flash line */
rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
if (IS_ERR(rt->enable_flash))
return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
"cannot get ENF (enable flash) GPIO\n");
/* ENT - Enable Torch line */
rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
if (IS_ERR(rt->enable_torch))
return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
"cannot get ENT (enable torch) GPIO\n");
child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
if (!child) {
dev_err(dev,
"No fwnode child node found for connected LED.\n");
return -EINVAL;
}
init_data.fwnode = child;
rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
"flash-max-microamp",
RT8515_FLASH_MAX,
&rt->flash_max_intensity);
rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
"led-max-microamp",
RT8515_TORCH_MAX,
&rt->torch_max_intensity);
ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
&rt->max_timeout);
if (ret) {
rt->max_timeout = RT8515_MAX_TIMEOUT_US;
dev_warn(dev,
"flash-max-timeout-us property missing\n");
}
timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
rt8515_init_flash_timeout(rt);
fled->ops = &rt8515_flash_ops;
led->max_brightness = rt->torch_max_intensity;
led->brightness_set_blocking = rt8515_led_brightness_set;
led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
mutex_init(&rt->lock);
platform_set_drvdata(pdev, rt);
ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
if (ret) {
fwnode_handle_put(child);
mutex_destroy(&rt->lock);
dev_err(dev, "can't register LED %s\n", led->name);
return ret;
}
rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
/* Create a V4L2 Flash device if V4L2 flash is enabled */
rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
if (IS_ERR(rt->v4l2_flash)) {
ret = PTR_ERR(rt->v4l2_flash);
dev_err(dev, "failed to register V4L2 flash device (%d)\n",
ret);
/*
* Continue without the V4L2 flash
* (we still have the classdev)
*/
}
fwnode_handle_put(child);
return 0;
}
static int rt8515_remove(struct platform_device *pdev)
{
struct rt8515 *rt = platform_get_drvdata(pdev);
rt8515_v4l2_flash_release(rt);
del_timer_sync(&rt->powerdown_timer);
mutex_destroy(&rt->lock);
return 0;
}
static const struct of_device_id rt8515_match[] = {
{ .compatible = "richtek,rt8515", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rt8515_match);
static struct platform_driver rt8515_driver = {
.driver = {
.name = "rt8515",
.of_match_table = rt8515_match,
},
.probe = rt8515_probe,
.remove = rt8515_remove,
};
module_platform_driver(rt8515_driver);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_DESCRIPTION("Richtek RT8515 LED driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/leds/flash/leds-rt8515.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bitops.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <media/v4l2-flash-led-class.h>
#define RT4505_REG_RESET 0x0
#define RT4505_REG_CONFIG 0x8
#define RT4505_REG_ILED 0x9
#define RT4505_REG_ENABLE 0xA
#define RT4505_REG_FLAGS 0xB
#define RT4505_RESET_MASK BIT(7)
#define RT4505_FLASHTO_MASK GENMASK(2, 0)
#define RT4505_ITORCH_MASK GENMASK(7, 5)
#define RT4505_ITORCH_SHIFT 5
#define RT4505_IFLASH_MASK GENMASK(4, 0)
#define RT4505_ENABLE_MASK GENMASK(5, 0)
#define RT4505_TORCH_SET (BIT(0) | BIT(4))
#define RT4505_FLASH_SET (BIT(0) | BIT(1) | BIT(2) | BIT(4))
#define RT4505_EXT_FLASH_SET (BIT(0) | BIT(1) | BIT(4) | BIT(5))
#define RT4505_FLASH_GET (BIT(0) | BIT(1) | BIT(4))
#define RT4505_OVP_MASK BIT(3)
#define RT4505_SHORT_MASK BIT(2)
#define RT4505_OTP_MASK BIT(1)
#define RT4505_TIMEOUT_MASK BIT(0)
#define RT4505_ITORCH_MINUA 46000
#define RT4505_ITORCH_MAXUA 375000
#define RT4505_ITORCH_STPUA 47000
#define RT4505_IFLASH_MINUA 93750
#define RT4505_IFLASH_MAXUA 1500000
#define RT4505_IFLASH_STPUA 93750
#define RT4505_FLASHTO_MINUS 100000
#define RT4505_FLASHTO_MAXUS 800000
#define RT4505_FLASHTO_STPUS 100000
struct rt4505_priv {
struct device *dev;
struct regmap *regmap;
struct mutex lock;
struct led_classdev_flash flash;
struct v4l2_flash *v4l2_flash;
};
static int rt4505_torch_brightness_set(struct led_classdev *lcdev,
enum led_brightness level)
{
struct rt4505_priv *priv =
container_of(lcdev, struct rt4505_priv, flash.led_cdev);
u32 val = 0;
int ret;
mutex_lock(&priv->lock);
if (level != LED_OFF) {
ret = regmap_update_bits(priv->regmap,
RT4505_REG_ILED, RT4505_ITORCH_MASK,
(level - 1) << RT4505_ITORCH_SHIFT);
if (ret)
goto unlock;
val = RT4505_TORCH_SET;
}
ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
RT4505_ENABLE_MASK, val);
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static enum led_brightness rt4505_torch_brightness_get(
struct led_classdev *lcdev)
{
struct rt4505_priv *priv =
container_of(lcdev, struct rt4505_priv, flash.led_cdev);
u32 val;
int ret;
mutex_lock(&priv->lock);
ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
if (ret) {
dev_err(lcdev->dev, "Failed to get LED enable\n");
ret = LED_OFF;
goto unlock;
}
if ((val & RT4505_ENABLE_MASK) != RT4505_TORCH_SET) {
ret = LED_OFF;
goto unlock;
}
ret = regmap_read(priv->regmap, RT4505_REG_ILED, &val);
if (ret) {
dev_err(lcdev->dev, "Failed to get LED brightness\n");
ret = LED_OFF;
goto unlock;
}
ret = ((val & RT4505_ITORCH_MASK) >> RT4505_ITORCH_SHIFT) + 1;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int rt4505_flash_brightness_set(struct led_classdev_flash *fled_cdev,
u32 brightness)
{
struct rt4505_priv *priv =
container_of(fled_cdev, struct rt4505_priv, flash);
struct led_flash_setting *s = &fled_cdev->brightness;
u32 val = (brightness - s->min) / s->step;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, RT4505_REG_ILED,
RT4505_IFLASH_MASK, val);
mutex_unlock(&priv->lock);
return ret;
}
static int rt4505_flash_strobe_set(struct led_classdev_flash *fled_cdev,
bool state)
{
struct rt4505_priv *priv =
container_of(fled_cdev, struct rt4505_priv, flash);
u32 val = state ? RT4505_FLASH_SET : 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
RT4505_ENABLE_MASK, val);
mutex_unlock(&priv->lock);
return ret;
}
static int rt4505_flash_strobe_get(struct led_classdev_flash *fled_cdev,
bool *state)
{
struct rt4505_priv *priv =
container_of(fled_cdev, struct rt4505_priv, flash);
u32 val;
int ret;
mutex_lock(&priv->lock);
ret = regmap_read(priv->regmap, RT4505_REG_ENABLE, &val);
if (ret)
goto unlock;
*state = (val & RT4505_FLASH_GET) == RT4505_FLASH_GET;
unlock:
mutex_unlock(&priv->lock);
return ret;
}
static int rt4505_flash_timeout_set(struct led_classdev_flash *fled_cdev,
u32 timeout)
{
struct rt4505_priv *priv =
container_of(fled_cdev, struct rt4505_priv, flash);
struct led_flash_setting *s = &fled_cdev->timeout;
u32 val = (timeout - s->min) / s->step;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, RT4505_REG_CONFIG,
RT4505_FLASHTO_MASK, val);
mutex_unlock(&priv->lock);
return ret;
}
static int rt4505_fault_get(struct led_classdev_flash *fled_cdev, u32 *fault)
{
struct rt4505_priv *priv =
container_of(fled_cdev, struct rt4505_priv, flash);
u32 val, led_faults = 0;
int ret;
ret = regmap_read(priv->regmap, RT4505_REG_FLAGS, &val);
if (ret)
return ret;
if (val & RT4505_OVP_MASK)
led_faults |= LED_FAULT_OVER_VOLTAGE;
if (val & RT4505_SHORT_MASK)
led_faults |= LED_FAULT_SHORT_CIRCUIT;
if (val & RT4505_OTP_MASK)
led_faults |= LED_FAULT_OVER_TEMPERATURE;
if (val & RT4505_TIMEOUT_MASK)
led_faults |= LED_FAULT_TIMEOUT;
*fault = led_faults;
return 0;
}
static const struct led_flash_ops rt4505_flash_ops = {
.flash_brightness_set = rt4505_flash_brightness_set,
.strobe_set = rt4505_flash_strobe_set,
.strobe_get = rt4505_flash_strobe_get,
.timeout_set = rt4505_flash_timeout_set,
.fault_get = rt4505_fault_get,
};
static bool rt4505_is_accessible_reg(struct device *dev, unsigned int reg)
{
if (reg == RT4505_REG_RESET ||
(reg >= RT4505_REG_CONFIG && reg <= RT4505_REG_FLAGS))
return true;
return false;
}
static const struct regmap_config rt4505_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = RT4505_REG_FLAGS,
.readable_reg = rt4505_is_accessible_reg,
.writeable_reg = rt4505_is_accessible_reg,
};
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
static int rt4505_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
bool enable)
{
struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
struct rt4505_priv *priv =
container_of(flash, struct rt4505_priv, flash);
u32 val = enable ? RT4505_EXT_FLASH_SET : 0;
int ret;
mutex_lock(&priv->lock);
ret = regmap_update_bits(priv->regmap, RT4505_REG_ENABLE,
RT4505_ENABLE_MASK, val);
mutex_unlock(&priv->lock);
return ret;
}
static const struct v4l2_flash_ops v4l2_flash_ops = {
.external_strobe_set = rt4505_flash_external_strobe_set,
};
static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
struct v4l2_flash_config *config)
{
struct led_classdev_flash *flash = &priv->flash;
struct led_classdev *lcdev = &flash->led_cdev;
struct led_flash_setting *s;
strscpy(config->dev_name, lcdev->dev->kobj.name,
sizeof(config->dev_name));
s = &config->intensity;
s->min = RT4505_ITORCH_MINUA;
s->step = RT4505_ITORCH_STPUA;
s->max = s->val = s->min + (lcdev->max_brightness - 1) * s->step;
config->flash_faults = LED_FAULT_OVER_VOLTAGE |
LED_FAULT_SHORT_CIRCUIT |
LED_FAULT_LED_OVER_TEMPERATURE |
LED_FAULT_TIMEOUT;
config->has_external_strobe = 1;
}
#else
static const struct v4l2_flash_ops v4l2_flash_ops;
static void rt4505_init_v4l2_config(struct rt4505_priv *priv,
struct v4l2_flash_config *config)
{
}
#endif
static void rt4505_init_flash_properties(struct rt4505_priv *priv,
struct fwnode_handle *child)
{
struct led_classdev_flash *flash = &priv->flash;
struct led_classdev *lcdev = &flash->led_cdev;
struct led_flash_setting *s;
u32 val;
int ret;
ret = fwnode_property_read_u32(child, "led-max-microamp", &val);
if (ret) {
dev_warn(priv->dev, "led-max-microamp DT property missing\n");
val = RT4505_ITORCH_MINUA;
} else
val = clamp_val(val, RT4505_ITORCH_MINUA, RT4505_ITORCH_MAXUA);
lcdev->max_brightness =
(val - RT4505_ITORCH_MINUA) / RT4505_ITORCH_STPUA + 1;
lcdev->brightness_set_blocking = rt4505_torch_brightness_set;
lcdev->brightness_get = rt4505_torch_brightness_get;
lcdev->flags |= LED_DEV_CAP_FLASH;
ret = fwnode_property_read_u32(child, "flash-max-microamp", &val);
if (ret) {
dev_warn(priv->dev, "flash-max-microamp DT property missing\n");
val = RT4505_IFLASH_MINUA;
} else
val = clamp_val(val, RT4505_IFLASH_MINUA, RT4505_IFLASH_MAXUA);
s = &flash->brightness;
s->min = RT4505_IFLASH_MINUA;
s->step = RT4505_IFLASH_STPUA;
s->max = s->val = val;
ret = fwnode_property_read_u32(child, "flash-max-timeout-us", &val);
if (ret) {
dev_warn(priv->dev,
"flash-max-timeout-us DT property missing\n");
val = RT4505_FLASHTO_MINUS;
} else
val = clamp_val(val, RT4505_FLASHTO_MINUS,
RT4505_FLASHTO_MAXUS);
s = &flash->timeout;
s->min = RT4505_FLASHTO_MINUS;
s->step = RT4505_FLASHTO_STPUS;
s->max = s->val = val;
flash->ops = &rt4505_flash_ops;
}
static int rt4505_probe(struct i2c_client *client)
{
struct rt4505_priv *priv;
struct fwnode_handle *child;
struct led_init_data init_data = {};
struct v4l2_flash_config v4l2_config = {};
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &client->dev;
mutex_init(&priv->lock);
priv->regmap = devm_regmap_init_i2c(client, &rt4505_regmap_config);
if (IS_ERR(priv->regmap)) {
dev_err(priv->dev, "Failed to allocate register map\n");
return PTR_ERR(priv->regmap);
}
ret = regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
if (ret) {
dev_err(priv->dev, "Failed to reset registers\n");
return ret;
}
child = fwnode_get_next_available_child_node(client->dev.fwnode, NULL);
if (!child) {
dev_err(priv->dev, "Failed to get child node\n");
return -EINVAL;
}
init_data.fwnode = child;
rt4505_init_flash_properties(priv, child);
ret = devm_led_classdev_flash_register_ext(priv->dev, &priv->flash,
&init_data);
if (ret) {
dev_err(priv->dev, "Failed to register flash\n");
return ret;
}
rt4505_init_v4l2_config(priv, &v4l2_config);
priv->v4l2_flash = v4l2_flash_init(priv->dev, init_data.fwnode,
&priv->flash, &v4l2_flash_ops,
&v4l2_config);
if (IS_ERR(priv->v4l2_flash)) {
dev_err(priv->dev, "Failed to register v4l2 flash\n");
return PTR_ERR(priv->v4l2_flash);
}
i2c_set_clientdata(client, priv);
return 0;
}
static void rt4505_remove(struct i2c_client *client)
{
struct rt4505_priv *priv = i2c_get_clientdata(client);
v4l2_flash_release(priv->v4l2_flash);
}
static void rt4505_shutdown(struct i2c_client *client)
{
struct rt4505_priv *priv = i2c_get_clientdata(client);
/* Reset registers to make sure all off before shutdown */
regmap_write(priv->regmap, RT4505_REG_RESET, RT4505_RESET_MASK);
}
static const struct of_device_id __maybe_unused rt4505_leds_match[] = {
{ .compatible = "richtek,rt4505", },
{}
};
MODULE_DEVICE_TABLE(of, rt4505_leds_match);
static struct i2c_driver rt4505_driver = {
.driver = {
.name = "rt4505",
.of_match_table = of_match_ptr(rt4505_leds_match),
},
.probe = rt4505_probe,
.remove = rt4505_remove,
.shutdown = rt4505_shutdown,
};
module_i2c_driver(rt4505_driver);
MODULE_AUTHOR("ChiYuan Huang <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/flash/leds-rt4505.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LED Flash class driver for the flash cell of max77693 mfd.
*
* Copyright (C) 2015, Samsung Electronics Co., Ltd.
*
* Authors: Jacek Anaszewski <[email protected]>
* Andrzej Hajda <[email protected]>
*/
#include <linux/led-class-flash.h>
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <media/v4l2-flash-led-class.h>
#define MODE_OFF 0
#define MODE_FLASH(a) (1 << (a))
#define MODE_TORCH(a) (1 << (2 + (a)))
#define MODE_FLASH_EXTERNAL(a) (1 << (4 + (a)))
#define MODE_FLASH_MASK (MODE_FLASH(FLED1) | MODE_FLASH(FLED2) | \
MODE_FLASH_EXTERNAL(FLED1) | \
MODE_FLASH_EXTERNAL(FLED2))
#define MODE_TORCH_MASK (MODE_TORCH(FLED1) | MODE_TORCH(FLED2))
#define FLED1_IOUT (1 << 0)
#define FLED2_IOUT (1 << 1)
enum max77693_fled {
FLED1,
FLED2,
};
enum max77693_led_mode {
FLASH,
TORCH,
};
struct max77693_led_config_data {
const char *label[2];
u32 iout_torch_max[2];
u32 iout_flash_max[2];
u32 flash_timeout_max[2];
u32 num_leds;
u32 boost_mode;
u32 boost_vout;
u32 low_vsys;
};
struct max77693_sub_led {
/* corresponding FLED output identifier */
int fled_id;
/* corresponding LED Flash class device */
struct led_classdev_flash fled_cdev;
/* V4L2 Flash device */
struct v4l2_flash *v4l2_flash;
/* brightness cache */
unsigned int torch_brightness;
/* flash timeout cache */
unsigned int flash_timeout;
/* flash faults that may have occurred */
u32 flash_faults;
};
struct max77693_led_device {
/* parent mfd regmap */
struct regmap *regmap;
/* platform device data */
struct platform_device *pdev;
/* secures access to the device */
struct mutex lock;
/* sub led data */
struct max77693_sub_led sub_leds[2];
/* maximum torch current values for FLED outputs */
u32 iout_torch_max[2];
/* maximum flash current values for FLED outputs */
u32 iout_flash_max[2];
/* current flash timeout cache */
unsigned int current_flash_timeout;
/* ITORCH register cache */
u8 torch_iout_reg;
/* mode of fled outputs */
unsigned int mode_flags;
/* recently strobed fled */
int strobing_sub_led_id;
/* bitmask of FLED outputs use state (bit 0. - FLED1, bit 1. - FLED2) */
u8 fled_mask;
/* FLED modes that can be set */
u8 allowed_modes;
/* arrangement of current outputs */
bool iout_joint;
};
static u8 max77693_led_iout_to_reg(u32 ua)
{
if (ua < FLASH_IOUT_MIN)
ua = FLASH_IOUT_MIN;
return (ua - FLASH_IOUT_MIN) / FLASH_IOUT_STEP;
}
static u8 max77693_flash_timeout_to_reg(u32 us)
{
return (us - FLASH_TIMEOUT_MIN) / FLASH_TIMEOUT_STEP;
}
static inline struct max77693_sub_led *flcdev_to_sub_led(
struct led_classdev_flash *fled_cdev)
{
return container_of(fled_cdev, struct max77693_sub_led, fled_cdev);
}
static inline struct max77693_led_device *sub_led_to_led(
struct max77693_sub_led *sub_led)
{
return container_of(sub_led, struct max77693_led_device,
sub_leds[sub_led->fled_id]);
}
static inline u8 max77693_led_vsys_to_reg(u32 mv)
{
return ((mv - MAX_FLASH1_VSYS_MIN) / MAX_FLASH1_VSYS_STEP) << 2;
}
static inline u8 max77693_led_vout_to_reg(u32 mv)
{
return (mv - FLASH_VOUT_MIN) / FLASH_VOUT_STEP + FLASH_VOUT_RMIN;
}
static inline bool max77693_fled_used(struct max77693_led_device *led,
int fled_id)
{
u8 fled_bit = (fled_id == FLED1) ? FLED1_IOUT : FLED2_IOUT;
return led->fled_mask & fled_bit;
}
static int max77693_set_mode_reg(struct max77693_led_device *led, u8 mode)
{
struct regmap *rmap = led->regmap;
int ret, v = 0, i;
for (i = FLED1; i <= FLED2; ++i) {
if (mode & MODE_TORCH(i))
v |= FLASH_EN_ON << TORCH_EN_SHIFT(i);
if (mode & MODE_FLASH(i)) {
v |= FLASH_EN_ON << FLASH_EN_SHIFT(i);
} else if (mode & MODE_FLASH_EXTERNAL(i)) {
v |= FLASH_EN_FLASH << FLASH_EN_SHIFT(i);
/*
* Enable hw triggering also for torch mode, as some
* camera sensors use torch led to fathom ambient light
* conditions before strobing the flash.
*/
v |= FLASH_EN_TORCH << TORCH_EN_SHIFT(i);
}
}
/* Reset the register only prior setting flash modes */
if (mode & ~(MODE_TORCH(FLED1) | MODE_TORCH(FLED2))) {
ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, 0);
if (ret < 0)
return ret;
}
return regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, v);
}
static int max77693_add_mode(struct max77693_led_device *led, u8 mode)
{
u8 new_mode_flags;
int i, ret;
if (led->iout_joint)
/* Span the mode on FLED2 for joint iouts case */
mode |= (mode << 1);
/*
* FLASH_EXTERNAL mode activates FLASHEN and TORCHEN pins in the device.
* Corresponding register bit fields interfere with SW triggered modes,
* thus clear them to ensure proper device configuration.
*/
for (i = FLED1; i <= FLED2; ++i)
if (mode & MODE_FLASH_EXTERNAL(i))
led->mode_flags &= (~MODE_TORCH(i) & ~MODE_FLASH(i));
new_mode_flags = mode | led->mode_flags;
new_mode_flags &= led->allowed_modes;
if (new_mode_flags ^ led->mode_flags)
led->mode_flags = new_mode_flags;
else
return 0;
ret = max77693_set_mode_reg(led, led->mode_flags);
if (ret < 0)
return ret;
/*
* Clear flash mode flag after setting the mode to avoid spurious flash
* strobing on each subsequent torch mode setting.
*/
if (mode & MODE_FLASH_MASK)
led->mode_flags &= ~mode;
return ret;
}
static int max77693_clear_mode(struct max77693_led_device *led,
u8 mode)
{
if (led->iout_joint)
/* Clear mode also on FLED2 for joint iouts case */
mode |= (mode << 1);
led->mode_flags &= ~mode;
return max77693_set_mode_reg(led, led->mode_flags);
}
static void max77693_add_allowed_modes(struct max77693_led_device *led,
int fled_id, enum max77693_led_mode mode)
{
if (mode == FLASH)
led->allowed_modes |= (MODE_FLASH(fled_id) |
MODE_FLASH_EXTERNAL(fled_id));
else
led->allowed_modes |= MODE_TORCH(fled_id);
}
static void max77693_distribute_currents(struct max77693_led_device *led,
int fled_id, enum max77693_led_mode mode,
u32 micro_amp, u32 iout_max[2], u32 iout[2])
{
if (!led->iout_joint) {
iout[fled_id] = micro_amp;
max77693_add_allowed_modes(led, fled_id, mode);
return;
}
iout[FLED1] = min(micro_amp, iout_max[FLED1]);
iout[FLED2] = micro_amp - iout[FLED1];
if (mode == FLASH)
led->allowed_modes &= ~MODE_FLASH_MASK;
else
led->allowed_modes &= ~MODE_TORCH_MASK;
max77693_add_allowed_modes(led, FLED1, mode);
if (iout[FLED2])
max77693_add_allowed_modes(led, FLED2, mode);
}
static int max77693_set_torch_current(struct max77693_led_device *led,
int fled_id, u32 micro_amp)
{
struct regmap *rmap = led->regmap;
u8 iout1_reg = 0, iout2_reg = 0;
u32 iout[2];
max77693_distribute_currents(led, fled_id, TORCH, micro_amp,
led->iout_torch_max, iout);
if (fled_id == FLED1 || led->iout_joint) {
iout1_reg = max77693_led_iout_to_reg(iout[FLED1]);
led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT2_SHIFT);
}
if (fled_id == FLED2 || led->iout_joint) {
iout2_reg = max77693_led_iout_to_reg(iout[FLED2]);
led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT1_SHIFT);
}
led->torch_iout_reg |= ((iout1_reg << TORCH_IOUT1_SHIFT) |
(iout2_reg << TORCH_IOUT2_SHIFT));
return regmap_write(rmap, MAX77693_LED_REG_ITORCH,
led->torch_iout_reg);
}
static int max77693_set_flash_current(struct max77693_led_device *led,
int fled_id,
u32 micro_amp)
{
struct regmap *rmap = led->regmap;
u8 iout1_reg, iout2_reg;
u32 iout[2];
int ret = -EINVAL;
max77693_distribute_currents(led, fled_id, FLASH, micro_amp,
led->iout_flash_max, iout);
if (fled_id == FLED1 || led->iout_joint) {
iout1_reg = max77693_led_iout_to_reg(iout[FLED1]);
ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH1,
iout1_reg);
if (ret < 0)
return ret;
}
if (fled_id == FLED2 || led->iout_joint) {
iout2_reg = max77693_led_iout_to_reg(iout[FLED2]);
ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH2,
iout2_reg);
}
return ret;
}
static int max77693_set_timeout(struct max77693_led_device *led, u32 microsec)
{
struct regmap *rmap = led->regmap;
u8 v;
int ret;
v = max77693_flash_timeout_to_reg(microsec) | FLASH_TMR_LEVEL;
ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_TIMER, v);
if (ret < 0)
return ret;
led->current_flash_timeout = microsec;
return 0;
}
static int max77693_get_strobe_status(struct max77693_led_device *led,
bool *state)
{
struct regmap *rmap = led->regmap;
unsigned int v;
int ret;
ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_STATUS, &v);
if (ret < 0)
return ret;
*state = v & FLASH_STATUS_FLASH_ON;
return ret;
}
static int max77693_get_flash_faults(struct max77693_sub_led *sub_led)
{
struct max77693_led_device *led = sub_led_to_led(sub_led);
struct regmap *rmap = led->regmap;
unsigned int v;
u8 fault_open_mask, fault_short_mask;
int ret;
sub_led->flash_faults = 0;
if (led->iout_joint) {
fault_open_mask = FLASH_INT_FLED1_OPEN | FLASH_INT_FLED2_OPEN;
fault_short_mask = FLASH_INT_FLED1_SHORT |
FLASH_INT_FLED2_SHORT;
} else {
fault_open_mask = (sub_led->fled_id == FLED1) ?
FLASH_INT_FLED1_OPEN :
FLASH_INT_FLED2_OPEN;
fault_short_mask = (sub_led->fled_id == FLED1) ?
FLASH_INT_FLED1_SHORT :
FLASH_INT_FLED2_SHORT;
}
ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_INT, &v);
if (ret < 0)
return ret;
if (v & fault_open_mask)
sub_led->flash_faults |= LED_FAULT_OVER_VOLTAGE;
if (v & fault_short_mask)
sub_led->flash_faults |= LED_FAULT_SHORT_CIRCUIT;
if (v & FLASH_INT_OVER_CURRENT)
sub_led->flash_faults |= LED_FAULT_OVER_CURRENT;
return 0;
}
static int max77693_setup(struct max77693_led_device *led,
struct max77693_led_config_data *led_cfg)
{
struct regmap *rmap = led->regmap;
int i, first_led, last_led, ret;
u32 max_flash_curr[2];
u8 v;
/*
* Initialize only flash current. Torch current doesn't
* require initialization as ITORCH register is written with
* new value each time brightness_set op is called.
*/
if (led->iout_joint) {
first_led = FLED1;
last_led = FLED1;
max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1] +
led_cfg->iout_flash_max[FLED2];
} else {
first_led = max77693_fled_used(led, FLED1) ? FLED1 : FLED2;
last_led = max77693_fled_used(led, FLED2) ? FLED2 : FLED1;
max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1];
max_flash_curr[FLED2] = led_cfg->iout_flash_max[FLED2];
}
for (i = first_led; i <= last_led; ++i) {
ret = max77693_set_flash_current(led, i,
max_flash_curr[i]);
if (ret < 0)
return ret;
}
v = TORCH_TMR_NO_TIMER | MAX77693_LED_TRIG_TYPE_LEVEL;
ret = regmap_write(rmap, MAX77693_LED_REG_ITORCHTIMER, v);
if (ret < 0)
return ret;
if (led_cfg->low_vsys > 0)
v = max77693_led_vsys_to_reg(led_cfg->low_vsys) |
MAX_FLASH1_MAX_FL_EN;
else
v = 0;
ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH1, v);
if (ret < 0)
return ret;
ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH2, 0);
if (ret < 0)
return ret;
if (led_cfg->boost_mode == MAX77693_LED_BOOST_FIXED)
v = FLASH_BOOST_FIXED;
else
v = led_cfg->boost_mode | led_cfg->boost_mode << 1;
if (max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2))
v |= FLASH_BOOST_LEDNUM_2;
ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_CNTL, v);
if (ret < 0)
return ret;
v = max77693_led_vout_to_reg(led_cfg->boost_vout);
ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_FLASH1, v);
if (ret < 0)
return ret;
return max77693_set_mode_reg(led, MODE_OFF);
}
/* LED subsystem callbacks */
static int max77693_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
int fled_id = sub_led->fled_id, ret;
mutex_lock(&led->lock);
if (value == 0) {
ret = max77693_clear_mode(led, MODE_TORCH(fled_id));
if (ret < 0)
dev_dbg(&led->pdev->dev,
"Failed to clear torch mode (%d)\n",
ret);
goto unlock;
}
ret = max77693_set_torch_current(led, fled_id, value * TORCH_IOUT_STEP);
if (ret < 0) {
dev_dbg(&led->pdev->dev,
"Failed to set torch current (%d)\n",
ret);
goto unlock;
}
ret = max77693_add_mode(led, MODE_TORCH(fled_id));
if (ret < 0)
dev_dbg(&led->pdev->dev,
"Failed to set torch mode (%d)\n",
ret);
unlock:
mutex_unlock(&led->lock);
return ret;
}
static int max77693_led_flash_brightness_set(
struct led_classdev_flash *fled_cdev,
u32 brightness)
{
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
int ret;
mutex_lock(&led->lock);
ret = max77693_set_flash_current(led, sub_led->fled_id, brightness);
mutex_unlock(&led->lock);
return ret;
}
static int max77693_led_flash_strobe_set(
struct led_classdev_flash *fled_cdev,
bool state)
{
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
int fled_id = sub_led->fled_id;
int ret;
mutex_lock(&led->lock);
if (!state) {
ret = max77693_clear_mode(led, MODE_FLASH(fled_id));
goto unlock;
}
if (sub_led->flash_timeout != led->current_flash_timeout) {
ret = max77693_set_timeout(led, sub_led->flash_timeout);
if (ret < 0)
goto unlock;
}
led->strobing_sub_led_id = fled_id;
ret = max77693_add_mode(led, MODE_FLASH(fled_id));
if (ret < 0)
goto unlock;
ret = max77693_get_flash_faults(sub_led);
unlock:
mutex_unlock(&led->lock);
return ret;
}
static int max77693_led_flash_fault_get(
struct led_classdev_flash *fled_cdev,
u32 *fault)
{
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
*fault = sub_led->flash_faults;
return 0;
}
static int max77693_led_flash_strobe_get(
struct led_classdev_flash *fled_cdev,
bool *state)
{
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
int ret;
if (!state)
return -EINVAL;
mutex_lock(&led->lock);
ret = max77693_get_strobe_status(led, state);
*state = !!(*state && (led->strobing_sub_led_id == sub_led->fled_id));
mutex_unlock(&led->lock);
return ret;
}
static int max77693_led_flash_timeout_set(
struct led_classdev_flash *fled_cdev,
u32 timeout)
{
struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
mutex_lock(&led->lock);
sub_led->flash_timeout = timeout;
mutex_unlock(&led->lock);
return 0;
}
static int max77693_led_parse_dt(struct max77693_led_device *led,
struct max77693_led_config_data *cfg,
struct device_node **sub_nodes)
{
struct device *dev = &led->pdev->dev;
struct max77693_sub_led *sub_leds = led->sub_leds;
struct device_node *node = dev_of_node(dev), *child_node;
struct property *prop;
u32 led_sources[2];
int i, ret, fled_id;
of_property_read_u32(node, "maxim,boost-mode", &cfg->boost_mode);
of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout);
of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys);
for_each_available_child_of_node(node, child_node) {
prop = of_find_property(child_node, "led-sources", NULL);
if (prop) {
const __be32 *srcs = NULL;
for (i = 0; i < ARRAY_SIZE(led_sources); ++i) {
srcs = of_prop_next_u32(prop, srcs,
&led_sources[i]);
if (!srcs)
break;
}
} else {
dev_err(dev,
"led-sources DT property missing\n");
of_node_put(child_node);
return -EINVAL;
}
if (i == 2) {
fled_id = FLED1;
led->fled_mask = FLED1_IOUT | FLED2_IOUT;
} else if (led_sources[0] == FLED1) {
fled_id = FLED1;
led->fled_mask |= FLED1_IOUT;
} else if (led_sources[0] == FLED2) {
fled_id = FLED2;
led->fled_mask |= FLED2_IOUT;
} else {
dev_err(dev,
"Wrong led-sources DT property value.\n");
of_node_put(child_node);
return -EINVAL;
}
if (sub_nodes[fled_id]) {
dev_err(dev,
"Conflicting \"led-sources\" DT properties\n");
of_node_put(child_node);
return -EINVAL;
}
sub_nodes[fled_id] = child_node;
sub_leds[fled_id].fled_id = fled_id;
cfg->label[fled_id] =
of_get_property(child_node, "label", NULL) ? :
child_node->name;
ret = of_property_read_u32(child_node, "led-max-microamp",
&cfg->iout_torch_max[fled_id]);
if (ret < 0) {
cfg->iout_torch_max[fled_id] = TORCH_IOUT_MIN;
dev_warn(dev, "led-max-microamp DT property missing\n");
}
ret = of_property_read_u32(child_node, "flash-max-microamp",
&cfg->iout_flash_max[fled_id]);
if (ret < 0) {
cfg->iout_flash_max[fled_id] = FLASH_IOUT_MIN;
dev_warn(dev,
"flash-max-microamp DT property missing\n");
}
ret = of_property_read_u32(child_node, "flash-max-timeout-us",
&cfg->flash_timeout_max[fled_id]);
if (ret < 0) {
cfg->flash_timeout_max[fled_id] = FLASH_TIMEOUT_MIN;
dev_warn(dev,
"flash-max-timeout-us DT property missing\n");
}
if (++cfg->num_leds == 2 ||
(max77693_fled_used(led, FLED1) &&
max77693_fled_used(led, FLED2))) {
of_node_put(child_node);
break;
}
}
if (cfg->num_leds == 0) {
dev_err(dev, "No DT child node found for connected LED(s).\n");
return -EINVAL;
}
return 0;
}
static void clamp_align(u32 *v, u32 min, u32 max, u32 step)
{
*v = clamp_val(*v, min, max);
if (step > 1)
*v = (*v - min) / step * step + min;
}
static void max77693_align_iout_current(struct max77693_led_device *led,
u32 *iout, u32 min, u32 max, u32 step)
{
int i;
if (led->iout_joint) {
if (iout[FLED1] > min) {
iout[FLED1] /= 2;
iout[FLED2] = iout[FLED1];
} else {
iout[FLED1] = min;
iout[FLED2] = 0;
return;
}
}
for (i = FLED1; i <= FLED2; ++i)
if (max77693_fled_used(led, i))
clamp_align(&iout[i], min, max, step);
else
iout[i] = 0;
}
static void max77693_led_validate_configuration(struct max77693_led_device *led,
struct max77693_led_config_data *cfg)
{
u32 flash_iout_max = cfg->boost_mode ? FLASH_IOUT_MAX_2LEDS :
FLASH_IOUT_MAX_1LED;
int i;
if (cfg->num_leds == 1 &&
max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2))
led->iout_joint = true;
cfg->boost_mode = clamp_val(cfg->boost_mode, MAX77693_LED_BOOST_NONE,
MAX77693_LED_BOOST_FIXED);
/* Boost must be enabled if both current outputs are used */
if ((cfg->boost_mode == MAX77693_LED_BOOST_NONE) && led->iout_joint)
cfg->boost_mode = MAX77693_LED_BOOST_FIXED;
max77693_align_iout_current(led, cfg->iout_torch_max,
TORCH_IOUT_MIN, TORCH_IOUT_MAX, TORCH_IOUT_STEP);
max77693_align_iout_current(led, cfg->iout_flash_max,
FLASH_IOUT_MIN, flash_iout_max, FLASH_IOUT_STEP);
for (i = 0; i < ARRAY_SIZE(cfg->flash_timeout_max); ++i)
clamp_align(&cfg->flash_timeout_max[i], FLASH_TIMEOUT_MIN,
FLASH_TIMEOUT_MAX, FLASH_TIMEOUT_STEP);
clamp_align(&cfg->boost_vout, FLASH_VOUT_MIN, FLASH_VOUT_MAX,
FLASH_VOUT_STEP);
if (cfg->low_vsys)
clamp_align(&cfg->low_vsys, MAX_FLASH1_VSYS_MIN,
MAX_FLASH1_VSYS_MAX, MAX_FLASH1_VSYS_STEP);
}
static int max77693_led_get_configuration(struct max77693_led_device *led,
struct max77693_led_config_data *cfg,
struct device_node **sub_nodes)
{
int ret;
ret = max77693_led_parse_dt(led, cfg, sub_nodes);
if (ret < 0)
return ret;
max77693_led_validate_configuration(led, cfg);
memcpy(led->iout_torch_max, cfg->iout_torch_max,
sizeof(led->iout_torch_max));
memcpy(led->iout_flash_max, cfg->iout_flash_max,
sizeof(led->iout_flash_max));
return 0;
}
static const struct led_flash_ops flash_ops = {
.flash_brightness_set = max77693_led_flash_brightness_set,
.strobe_set = max77693_led_flash_strobe_set,
.strobe_get = max77693_led_flash_strobe_get,
.timeout_set = max77693_led_flash_timeout_set,
.fault_get = max77693_led_flash_fault_get,
};
static void max77693_init_flash_settings(struct max77693_sub_led *sub_led,
struct max77693_led_config_data *led_cfg)
{
struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev;
struct max77693_led_device *led = sub_led_to_led(sub_led);
int fled_id = sub_led->fled_id;
struct led_flash_setting *setting;
/* Init flash intensity setting */
setting = &fled_cdev->brightness;
setting->min = FLASH_IOUT_MIN;
setting->max = led->iout_joint ?
led_cfg->iout_flash_max[FLED1] +
led_cfg->iout_flash_max[FLED2] :
led_cfg->iout_flash_max[fled_id];
setting->step = FLASH_IOUT_STEP;
setting->val = setting->max;
/* Init flash timeout setting */
setting = &fled_cdev->timeout;
setting->min = FLASH_TIMEOUT_MIN;
setting->max = led_cfg->flash_timeout_max[fled_id];
setting->step = FLASH_TIMEOUT_STEP;
setting->val = setting->max;
}
#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
static int max77693_led_external_strobe_set(
struct v4l2_flash *v4l2_flash,
bool enable)
{
struct max77693_sub_led *sub_led =
flcdev_to_sub_led(v4l2_flash->fled_cdev);
struct max77693_led_device *led = sub_led_to_led(sub_led);
int fled_id = sub_led->fled_id;
int ret;
mutex_lock(&led->lock);
if (enable)
ret = max77693_add_mode(led, MODE_FLASH_EXTERNAL(fled_id));
else
ret = max77693_clear_mode(led, MODE_FLASH_EXTERNAL(fled_id));
mutex_unlock(&led->lock);
return ret;
}
static void max77693_init_v4l2_flash_config(struct max77693_sub_led *sub_led,
struct max77693_led_config_data *led_cfg,
struct v4l2_flash_config *v4l2_sd_cfg)
{
struct max77693_led_device *led = sub_led_to_led(sub_led);
struct device *dev = &led->pdev->dev;
struct max77693_dev *iodev = dev_get_drvdata(dev->parent);
struct i2c_client *i2c = iodev->i2c;
struct led_flash_setting *s;
snprintf(v4l2_sd_cfg->dev_name, sizeof(v4l2_sd_cfg->dev_name),
"%s %d-%04x", sub_led->fled_cdev.led_cdev.name,
i2c_adapter_id(i2c->adapter), i2c->addr);
s = &v4l2_sd_cfg->intensity;
s->min = TORCH_IOUT_MIN;
s->max = sub_led->fled_cdev.led_cdev.max_brightness * TORCH_IOUT_STEP;
s->step = TORCH_IOUT_STEP;
s->val = s->max;
/* Init flash faults config */
v4l2_sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE |
LED_FAULT_SHORT_CIRCUIT |
LED_FAULT_OVER_CURRENT;
v4l2_sd_cfg->has_external_strobe = true;
}
static const struct v4l2_flash_ops v4l2_flash_ops = {
.external_strobe_set = max77693_led_external_strobe_set,
};
#else
static inline void max77693_init_v4l2_flash_config(
struct max77693_sub_led *sub_led,
struct max77693_led_config_data *led_cfg,
struct v4l2_flash_config *v4l2_sd_cfg)
{
}
static const struct v4l2_flash_ops v4l2_flash_ops;
#endif
static void max77693_init_fled_cdev(struct max77693_sub_led *sub_led,
struct max77693_led_config_data *led_cfg)
{
struct max77693_led_device *led = sub_led_to_led(sub_led);
int fled_id = sub_led->fled_id;
struct led_classdev_flash *fled_cdev;
struct led_classdev *led_cdev;
/* Initialize LED Flash class device */
fled_cdev = &sub_led->fled_cdev;
fled_cdev->ops = &flash_ops;
led_cdev = &fled_cdev->led_cdev;
led_cdev->name = led_cfg->label[fled_id];
led_cdev->brightness_set_blocking = max77693_led_brightness_set;
led_cdev->max_brightness = (led->iout_joint ?
led_cfg->iout_torch_max[FLED1] +
led_cfg->iout_torch_max[FLED2] :
led_cfg->iout_torch_max[fled_id]) /
TORCH_IOUT_STEP;
led_cdev->flags |= LED_DEV_CAP_FLASH;
max77693_init_flash_settings(sub_led, led_cfg);
/* Init flash timeout cache */
sub_led->flash_timeout = fled_cdev->timeout.val;
}
static int max77693_register_led(struct max77693_sub_led *sub_led,
struct max77693_led_config_data *led_cfg,
struct device_node *sub_node)
{
struct max77693_led_device *led = sub_led_to_led(sub_led);
struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev;
struct device *dev = &led->pdev->dev;
struct v4l2_flash_config v4l2_sd_cfg = {};
int ret;
/* Register in the LED subsystem */
ret = led_classdev_flash_register(dev, fled_cdev);
if (ret < 0)
return ret;
max77693_init_v4l2_flash_config(sub_led, led_cfg, &v4l2_sd_cfg);
/* Register in the V4L2 subsystem. */
sub_led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node),
fled_cdev, &v4l2_flash_ops,
&v4l2_sd_cfg);
if (IS_ERR(sub_led->v4l2_flash)) {
ret = PTR_ERR(sub_led->v4l2_flash);
goto err_v4l2_flash_init;
}
return 0;
err_v4l2_flash_init:
led_classdev_flash_unregister(fled_cdev);
return ret;
}
static int max77693_led_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct max77693_dev *iodev = dev_get_drvdata(dev->parent);
struct max77693_led_device *led;
struct max77693_sub_led *sub_leds;
struct device_node *sub_nodes[2] = {};
struct max77693_led_config_data led_cfg = {};
int init_fled_cdev[2], i, ret;
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
led->pdev = pdev;
led->regmap = iodev->regmap;
led->allowed_modes = MODE_FLASH_MASK;
sub_leds = led->sub_leds;
platform_set_drvdata(pdev, led);
ret = max77693_led_get_configuration(led, &led_cfg, sub_nodes);
if (ret < 0)
return ret;
ret = max77693_setup(led, &led_cfg);
if (ret < 0)
return ret;
mutex_init(&led->lock);
init_fled_cdev[FLED1] =
led->iout_joint || max77693_fled_used(led, FLED1);
init_fled_cdev[FLED2] =
!led->iout_joint && max77693_fled_used(led, FLED2);
for (i = FLED1; i <= FLED2; ++i) {
if (!init_fled_cdev[i])
continue;
/* Initialize LED Flash class device */
max77693_init_fled_cdev(&sub_leds[i], &led_cfg);
/*
* Register LED Flash class device and corresponding
* V4L2 Flash device.
*/
ret = max77693_register_led(&sub_leds[i], &led_cfg,
sub_nodes[i]);
if (ret < 0) {
/*
* At this moment FLED1 might have been already
* registered and it needs to be released.
*/
if (i == FLED2)
goto err_register_led2;
else
goto err_register_led1;
}
}
return 0;
err_register_led2:
/* It is possible than only FLED2 was to be registered */
if (!init_fled_cdev[FLED1])
goto err_register_led1;
v4l2_flash_release(sub_leds[FLED1].v4l2_flash);
led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev);
err_register_led1:
mutex_destroy(&led->lock);
return ret;
}
static int max77693_led_remove(struct platform_device *pdev)
{
struct max77693_led_device *led = platform_get_drvdata(pdev);
struct max77693_sub_led *sub_leds = led->sub_leds;
if (led->iout_joint || max77693_fled_used(led, FLED1)) {
v4l2_flash_release(sub_leds[FLED1].v4l2_flash);
led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev);
}
if (!led->iout_joint && max77693_fled_used(led, FLED2)) {
v4l2_flash_release(sub_leds[FLED2].v4l2_flash);
led_classdev_flash_unregister(&sub_leds[FLED2].fled_cdev);
}
mutex_destroy(&led->lock);
return 0;
}
static const struct of_device_id max77693_led_dt_match[] = {
{ .compatible = "maxim,max77693-led" },
{},
};
MODULE_DEVICE_TABLE(of, max77693_led_dt_match);
static struct platform_driver max77693_led_driver = {
.probe = max77693_led_probe,
.remove = max77693_led_remove,
.driver = {
.name = "max77693-led",
.of_match_table = max77693_led_dt_match,
},
};
module_platform_driver(max77693_led_driver);
MODULE_AUTHOR("Jacek Anaszewski <[email protected]>");
MODULE_AUTHOR("Andrzej Hajda <[email protected]>");
MODULE_DESCRIPTION("Maxim MAX77693 led flash driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/leds/flash/leds-max77693.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* The "Virtual Machine Generation ID" is exposed via ACPI and changes when a
* virtual machine forks or is cloned. This driver exists for shepherding that
* information to random.c.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/random.h>
ACPI_MODULE_NAME("vmgenid");
enum { VMGENID_SIZE = 16 };
struct vmgenid_state {
u8 *next_id;
u8 this_id[VMGENID_SIZE];
};
static int vmgenid_add(struct acpi_device *device)
{
struct acpi_buffer parsed = { ACPI_ALLOCATE_BUFFER };
struct vmgenid_state *state;
union acpi_object *obj;
phys_addr_t phys_addr;
acpi_status status;
int ret = 0;
state = devm_kmalloc(&device->dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
status = acpi_evaluate_object(device->handle, "ADDR", NULL, &parsed);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating ADDR"));
return -ENODEV;
}
obj = parsed.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 2 ||
obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
ret = -EINVAL;
goto out;
}
phys_addr = (obj->package.elements[0].integer.value << 0) |
(obj->package.elements[1].integer.value << 32);
state->next_id = devm_memremap(&device->dev, phys_addr, VMGENID_SIZE, MEMREMAP_WB);
if (IS_ERR(state->next_id)) {
ret = PTR_ERR(state->next_id);
goto out;
}
memcpy(state->this_id, state->next_id, sizeof(state->this_id));
add_device_randomness(state->this_id, sizeof(state->this_id));
device->driver_data = state;
out:
ACPI_FREE(parsed.pointer);
return ret;
}
static void vmgenid_notify(struct acpi_device *device, u32 event)
{
struct vmgenid_state *state = acpi_driver_data(device);
u8 old_id[VMGENID_SIZE];
memcpy(old_id, state->this_id, sizeof(old_id));
memcpy(state->this_id, state->next_id, sizeof(state->this_id));
if (!memcmp(old_id, state->this_id, sizeof(old_id)))
return;
add_vmfork_randomness(state->this_id, sizeof(state->this_id));
}
static const struct acpi_device_id vmgenid_ids[] = {
{ "VMGENCTR", 0 },
{ "VM_GEN_COUNTER", 0 },
{ }
};
static struct acpi_driver vmgenid_driver = {
.name = "vmgenid",
.ids = vmgenid_ids,
.owner = THIS_MODULE,
.ops = {
.add = vmgenid_add,
.notify = vmgenid_notify
}
};
module_acpi_driver(vmgenid_driver);
MODULE_DEVICE_TABLE(acpi, vmgenid_ids);
MODULE_DESCRIPTION("Virtual Machine Generation ID");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | drivers/virt/vmgenid.c |
/*
* Freescale Hypervisor Management Driver
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
* Author: Timur Tabi <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
* The Freescale hypervisor management driver provides several services to
* drivers and applications related to the Freescale hypervisor:
*
* 1. An ioctl interface for querying and managing partitions.
*
* 2. A file interface to reading incoming doorbells.
*
* 3. An interrupt handler for shutting down the partition upon receiving the
* shutdown doorbell from a manager partition.
*
* 4. A kernel interface for receiving callbacks when a managed partition
* shuts down.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/notifier.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/fsl_hcalls.h>
#include <linux/fsl_hypervisor.h>
static BLOCKING_NOTIFIER_HEAD(failover_subscribers);
/*
* Ioctl interface for FSL_HV_IOCTL_PARTITION_RESTART
*
* Restart a running partition
*/
static long ioctl_restart(struct fsl_hv_ioctl_restart __user *p)
{
struct fsl_hv_ioctl_restart param;
/* Get the parameters from the user */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_restart)))
return -EFAULT;
param.ret = fh_partition_restart(param.partition);
if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
return -EFAULT;
return 0;
}
/*
* Ioctl interface for FSL_HV_IOCTL_PARTITION_STATUS
*
* Query the status of a partition
*/
static long ioctl_status(struct fsl_hv_ioctl_status __user *p)
{
struct fsl_hv_ioctl_status param;
u32 status;
/* Get the parameters from the user */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_status)))
return -EFAULT;
param.ret = fh_partition_get_status(param.partition, &status);
if (!param.ret)
param.status = status;
if (copy_to_user(p, ¶m, sizeof(struct fsl_hv_ioctl_status)))
return -EFAULT;
return 0;
}
/*
* Ioctl interface for FSL_HV_IOCTL_PARTITION_START
*
* Start a stopped partition.
*/
static long ioctl_start(struct fsl_hv_ioctl_start __user *p)
{
struct fsl_hv_ioctl_start param;
/* Get the parameters from the user */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_start)))
return -EFAULT;
param.ret = fh_partition_start(param.partition, param.entry_point,
param.load);
if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
return -EFAULT;
return 0;
}
/*
* Ioctl interface for FSL_HV_IOCTL_PARTITION_STOP
*
* Stop a running partition
*/
static long ioctl_stop(struct fsl_hv_ioctl_stop __user *p)
{
struct fsl_hv_ioctl_stop param;
/* Get the parameters from the user */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_stop)))
return -EFAULT;
param.ret = fh_partition_stop(param.partition);
if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
return -EFAULT;
return 0;
}
/*
* Ioctl interface for FSL_HV_IOCTL_MEMCPY
*
* The FH_MEMCPY hypercall takes an array of address/address/size structures
* to represent the data being copied. As a convenience to the user, this
* ioctl takes a user-create buffer and a pointer to a guest physically
* contiguous buffer in the remote partition, and creates the
* address/address/size array for the hypercall.
*/
static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
{
struct fsl_hv_ioctl_memcpy param;
struct page **pages = NULL;
void *sg_list_unaligned = NULL;
struct fh_sg_list *sg_list = NULL;
unsigned int num_pages;
unsigned long lb_offset; /* Offset within a page of the local buffer */
unsigned int i;
long ret = 0;
int num_pinned = 0; /* return value from get_user_pages_fast() */
phys_addr_t remote_paddr; /* The next address in the remote buffer */
uint32_t count; /* The number of bytes left to copy */
/* Get the parameters from the user */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_memcpy)))
return -EFAULT;
/*
* One partition must be local, the other must be remote. In other
* words, if source and target are both -1, or are both not -1, then
* return an error.
*/
if ((param.source == -1) == (param.target == -1))
return -EINVAL;
/*
* The array of pages returned by get_user_pages_fast() covers only
* page-aligned memory. Since the user buffer is probably not
* page-aligned, we need to handle the discrepancy.
*
* We calculate the offset within a page of the S/G list, and make
* adjustments accordingly. This will result in a page list that looks
* like this:
*
* ---- <-- first page starts before the buffer
* | |
* |////|-> ----
* |////| | |
* ---- | |
* | |
* ---- | |
* |////| | |
* |////| | |
* |////| | |
* ---- | |
* | |
* ---- | |
* |////| | |
* |////| | |
* |////| | |
* ---- | |
* | |
* ---- | |
* |////| | |
* |////|-> ----
* | | <-- last page ends after the buffer
* ----
*
* The distance between the start of the first page and the start of the
* buffer is lb_offset. The hashed (///) areas are the parts of the
* page list that contain the actual buffer.
*
* The advantage of this approach is that the number of pages is
* equal to the number of entries in the S/G list that we give to the
* hypervisor.
*/
lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
if (param.count == 0 ||
param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
return -EINVAL;
num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Allocate the buffers we need */
/*
* 'pages' is an array of struct page pointers that's initialized by
* get_user_pages_fast().
*/
pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
pr_debug("fsl-hv: could not allocate page list\n");
return -ENOMEM;
}
/*
* sg_list is the list of fh_sg_list objects that we pass to the
* hypervisor.
*/
sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
sizeof(struct fh_sg_list) - 1, GFP_KERNEL);
if (!sg_list_unaligned) {
pr_debug("fsl-hv: could not allocate S/G list\n");
ret = -ENOMEM;
goto free_pages;
}
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
/* Get the physical addresses of the source buffer */
num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
if (num_pinned != num_pages) {
pr_debug("fsl-hv: could not lock source buffer\n");
ret = (num_pinned < 0) ? num_pinned : -EFAULT;
goto exit;
}
/*
* Build the fh_sg_list[] array. The first page is special
* because it's misaligned.
*/
if (param.source == -1) {
sg_list[0].source = page_to_phys(pages[0]) + lb_offset;
sg_list[0].target = param.remote_paddr;
} else {
sg_list[0].source = param.remote_paddr;
sg_list[0].target = page_to_phys(pages[0]) + lb_offset;
}
sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset);
remote_paddr = param.remote_paddr + sg_list[0].size;
count = param.count - sg_list[0].size;
for (i = 1; i < num_pages; i++) {
if (param.source == -1) {
/* local to remote */
sg_list[i].source = page_to_phys(pages[i]);
sg_list[i].target = remote_paddr;
} else {
/* remote to local */
sg_list[i].source = remote_paddr;
sg_list[i].target = page_to_phys(pages[i]);
}
sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE);
remote_paddr += sg_list[i].size;
count -= sg_list[i].size;
}
param.ret = fh_partition_memcpy(param.source, param.target,
virt_to_phys(sg_list), num_pages);
exit:
if (pages && (num_pinned > 0)) {
for (i = 0; i < num_pinned; i++)
put_page(pages[i]);
}
kfree(sg_list_unaligned);
free_pages:
kfree(pages);
if (!ret)
if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
return -EFAULT;
return ret;
}
/*
* Ioctl interface for FSL_HV_IOCTL_DOORBELL
*
* Ring a doorbell
*/
static long ioctl_doorbell(struct fsl_hv_ioctl_doorbell __user *p)
{
struct fsl_hv_ioctl_doorbell param;
/* Get the parameters from the user. */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_doorbell)))
return -EFAULT;
param.ret = ev_doorbell_send(param.doorbell);
if (copy_to_user(&p->ret, ¶m.ret, sizeof(__u32)))
return -EFAULT;
return 0;
}
static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
{
struct fsl_hv_ioctl_prop param;
char __user *upath, *upropname;
void __user *upropval;
char *path, *propname;
void *propval;
int ret = 0;
/* Get the parameters from the user. */
if (copy_from_user(¶m, p, sizeof(struct fsl_hv_ioctl_prop)))
return -EFAULT;
upath = (char __user *)(uintptr_t)param.path;
upropname = (char __user *)(uintptr_t)param.propname;
upropval = (void __user *)(uintptr_t)param.propval;
path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
if (IS_ERR(path))
return PTR_ERR(path);
propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
if (IS_ERR(propname)) {
ret = PTR_ERR(propname);
goto err_free_path;
}
if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
ret = -EINVAL;
goto err_free_propname;
}
propval = kmalloc(param.proplen, GFP_KERNEL);
if (!propval) {
ret = -ENOMEM;
goto err_free_propname;
}
if (set) {
if (copy_from_user(propval, upropval, param.proplen)) {
ret = -EFAULT;
goto err_free_propval;
}
param.ret = fh_partition_set_dtprop(param.handle,
virt_to_phys(path),
virt_to_phys(propname),
virt_to_phys(propval),
param.proplen);
} else {
param.ret = fh_partition_get_dtprop(param.handle,
virt_to_phys(path),
virt_to_phys(propname),
virt_to_phys(propval),
¶m.proplen);
if (param.ret == 0) {
if (copy_to_user(upropval, propval, param.proplen) ||
put_user(param.proplen, &p->proplen)) {
ret = -EFAULT;
goto err_free_propval;
}
}
}
if (put_user(param.ret, &p->ret))
ret = -EFAULT;
err_free_propval:
kfree(propval);
err_free_propname:
kfree(propname);
err_free_path:
kfree(path);
return ret;
}
/*
* Ioctl main entry point
*/
static long fsl_hv_ioctl(struct file *file, unsigned int cmd,
unsigned long argaddr)
{
void __user *arg = (void __user *)argaddr;
long ret;
switch (cmd) {
case FSL_HV_IOCTL_PARTITION_RESTART:
ret = ioctl_restart(arg);
break;
case FSL_HV_IOCTL_PARTITION_GET_STATUS:
ret = ioctl_status(arg);
break;
case FSL_HV_IOCTL_PARTITION_START:
ret = ioctl_start(arg);
break;
case FSL_HV_IOCTL_PARTITION_STOP:
ret = ioctl_stop(arg);
break;
case FSL_HV_IOCTL_MEMCPY:
ret = ioctl_memcpy(arg);
break;
case FSL_HV_IOCTL_DOORBELL:
ret = ioctl_doorbell(arg);
break;
case FSL_HV_IOCTL_GETPROP:
ret = ioctl_dtprop(arg, 0);
break;
case FSL_HV_IOCTL_SETPROP:
ret = ioctl_dtprop(arg, 1);
break;
default:
pr_debug("fsl-hv: bad ioctl dir=%u type=%u cmd=%u size=%u\n",
_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd),
_IOC_SIZE(cmd));
return -ENOTTY;
}
return ret;
}
/* Linked list of processes that have us open */
static struct list_head db_list;
/* spinlock for db_list */
static DEFINE_SPINLOCK(db_list_lock);
/* The size of the doorbell event queue. This must be a power of two. */
#define QSIZE 16
/* Returns the next head/tail pointer, wrapping around the queue if necessary */
#define nextp(x) (((x) + 1) & (QSIZE - 1))
/* Per-open data structure */
struct doorbell_queue {
struct list_head list;
spinlock_t lock;
wait_queue_head_t wait;
unsigned int head;
unsigned int tail;
uint32_t q[QSIZE];
};
/* Linked list of ISRs that we registered */
struct list_head isr_list;
/* Per-ISR data structure */
struct doorbell_isr {
struct list_head list;
unsigned int irq;
uint32_t doorbell; /* The doorbell handle */
uint32_t partition; /* The partition handle, if used */
};
/*
* Add a doorbell to all of the doorbell queues
*/
static void fsl_hv_queue_doorbell(uint32_t doorbell)
{
struct doorbell_queue *dbq;
unsigned long flags;
/* Prevent another core from modifying db_list */
spin_lock_irqsave(&db_list_lock, flags);
list_for_each_entry(dbq, &db_list, list) {
if (dbq->head != nextp(dbq->tail)) {
dbq->q[dbq->tail] = doorbell;
/*
* This memory barrier eliminates the need to grab
* the spinlock for dbq.
*/
smp_wmb();
dbq->tail = nextp(dbq->tail);
wake_up_interruptible(&dbq->wait);
}
}
spin_unlock_irqrestore(&db_list_lock, flags);
}
/*
* Interrupt handler for all doorbells
*
* We use the same interrupt handler for all doorbells. Whenever a doorbell
* is rung, and we receive an interrupt, we just put the handle for that
* doorbell (passed to us as *data) into all of the queues.
*/
static irqreturn_t fsl_hv_isr(int irq, void *data)
{
fsl_hv_queue_doorbell((uintptr_t) data);
return IRQ_HANDLED;
}
/*
* State change thread function
*
* The state change notification arrives in an interrupt, but we can't call
* blocking_notifier_call_chain() in an interrupt handler. We could call
* atomic_notifier_call_chain(), but that would require the clients' call-back
* function to run in interrupt context. Since we don't want to impose that
* restriction on the clients, we use a threaded IRQ to process the
* notification in kernel context.
*/
static irqreturn_t fsl_hv_state_change_thread(int irq, void *data)
{
struct doorbell_isr *dbisr = data;
blocking_notifier_call_chain(&failover_subscribers, dbisr->partition,
NULL);
return IRQ_HANDLED;
}
/*
* Interrupt handler for state-change doorbells
*/
static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
{
unsigned int status;
struct doorbell_isr *dbisr = data;
int ret;
/* It's still a doorbell, so add it to all the queues. */
fsl_hv_queue_doorbell(dbisr->doorbell);
/* Determine the new state, and if it's stopped, notify the clients. */
ret = fh_partition_get_status(dbisr->partition, &status);
if (!ret && (status == FH_PARTITION_STOPPED))
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
/*
* Returns a bitmask indicating whether a read will block
*/
static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
__poll_t mask;
spin_lock_irqsave(&dbq->lock, flags);
poll_wait(filp, &dbq->wait, p);
mask = (dbq->head == dbq->tail) ? 0 : (EPOLLIN | EPOLLRDNORM);
spin_unlock_irqrestore(&dbq->lock, flags);
return mask;
}
/*
* Return the handles for any incoming doorbells
*
* If there are doorbell handles in the queue for this open instance, then
* return them to the caller as an array of 32-bit integers. Otherwise,
* block until there is at least one handle to return.
*/
static ssize_t fsl_hv_read(struct file *filp, char __user *buf, size_t len,
loff_t *off)
{
struct doorbell_queue *dbq = filp->private_data;
uint32_t __user *p = (uint32_t __user *) buf; /* for put_user() */
unsigned long flags;
ssize_t count = 0;
/* Make sure we stop when the user buffer is full. */
while (len >= sizeof(uint32_t)) {
uint32_t dbell; /* Local copy of doorbell queue data */
spin_lock_irqsave(&dbq->lock, flags);
/*
* If the queue is empty, then either we're done or we need
* to block. If the application specified O_NONBLOCK, then
* we return the appropriate error code.
*/
if (dbq->head == dbq->tail) {
spin_unlock_irqrestore(&dbq->lock, flags);
if (count)
break;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(dbq->wait,
dbq->head != dbq->tail))
return -ERESTARTSYS;
continue;
}
/*
* Even though we have an smp_wmb() in the ISR, the core
* might speculatively execute the "dbell = ..." below while
* it's evaluating the if-statement above. In that case, the
* value put into dbell could be stale if the core accepts the
* speculation. To prevent that, we need a read memory barrier
* here as well.
*/
smp_rmb();
/* Copy the data to a temporary local buffer, because
* we can't call copy_to_user() from inside a spinlock
*/
dbell = dbq->q[dbq->head];
dbq->head = nextp(dbq->head);
spin_unlock_irqrestore(&dbq->lock, flags);
if (put_user(dbell, p))
return -EFAULT;
p++;
count += sizeof(uint32_t);
len -= sizeof(uint32_t);
}
return count;
}
/*
* Open the driver and prepare for reading doorbells.
*
* Every time an application opens the driver, we create a doorbell queue
* for that file handle. This queue is used for any incoming doorbells.
*/
static int fsl_hv_open(struct inode *inode, struct file *filp)
{
struct doorbell_queue *dbq;
unsigned long flags;
dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
if (!dbq) {
pr_err("fsl-hv: out of memory\n");
return -ENOMEM;
}
spin_lock_init(&dbq->lock);
init_waitqueue_head(&dbq->wait);
spin_lock_irqsave(&db_list_lock, flags);
list_add(&dbq->list, &db_list);
spin_unlock_irqrestore(&db_list_lock, flags);
filp->private_data = dbq;
return 0;
}
/*
* Close the driver
*/
static int fsl_hv_close(struct inode *inode, struct file *filp)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
spin_lock_irqsave(&db_list_lock, flags);
list_del(&dbq->list);
spin_unlock_irqrestore(&db_list_lock, flags);
kfree(dbq);
return 0;
}
static const struct file_operations fsl_hv_fops = {
.owner = THIS_MODULE,
.open = fsl_hv_open,
.release = fsl_hv_close,
.poll = fsl_hv_poll,
.read = fsl_hv_read,
.unlocked_ioctl = fsl_hv_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice fsl_hv_misc_dev = {
MISC_DYNAMIC_MINOR,
"fsl-hv",
&fsl_hv_fops
};
static irqreturn_t fsl_hv_shutdown_isr(int irq, void *data)
{
orderly_poweroff(false);
return IRQ_HANDLED;
}
/*
* Returns the handle of the parent of the given node
*
* The handle is the value of the 'hv-handle' property
*/
static int get_parent_handle(struct device_node *np)
{
struct device_node *parent;
const uint32_t *prop;
uint32_t handle;
int len;
parent = of_get_parent(np);
if (!parent)
/* It's not really possible for this to fail */
return -ENODEV;
/*
* The proper name for the handle property is "hv-handle", but some
* older versions of the hypervisor used "reg".
*/
prop = of_get_property(parent, "hv-handle", &len);
if (!prop)
prop = of_get_property(parent, "reg", &len);
if (!prop || (len != sizeof(uint32_t))) {
/* This can happen only if the node is malformed */
of_node_put(parent);
return -ENODEV;
}
handle = be32_to_cpup(prop);
of_node_put(parent);
return handle;
}
/*
* Register a callback for failover events
*
* This function is called by device drivers to register their callback
* functions for fail-over events.
*/
int fsl_hv_failover_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&failover_subscribers, nb);
}
EXPORT_SYMBOL(fsl_hv_failover_register);
/*
* Unregister a callback for failover events
*/
int fsl_hv_failover_unregister(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&failover_subscribers, nb);
}
EXPORT_SYMBOL(fsl_hv_failover_unregister);
/*
* Return TRUE if we're running under FSL hypervisor
*
* This function checks to see if we're running under the Freescale
* hypervisor, and returns zero if we're not, or non-zero if we are.
*
* First, it checks if MSR[GS]==1, which means we're running under some
* hypervisor. Then it checks if there is a hypervisor node in the device
* tree. Currently, that means there needs to be a node in the root called
* "hypervisor" and which has a property named "fsl,hv-version".
*/
static int has_fsl_hypervisor(void)
{
struct device_node *node;
int ret;
node = of_find_node_by_path("/hypervisor");
if (!node)
return 0;
ret = of_property_present(node, "fsl,hv-version");
of_node_put(node);
return ret;
}
/*
* Freescale hypervisor management driver init
*
* This function is called when this module is loaded.
*
* Register ourselves as a miscellaneous driver. This will register the
* fops structure and create the right sysfs entries for udev.
*/
static int __init fsl_hypervisor_init(void)
{
struct device_node *np;
struct doorbell_isr *dbisr, *n;
int ret;
pr_info("Freescale hypervisor management driver\n");
if (!has_fsl_hypervisor()) {
pr_info("fsl-hv: no hypervisor found\n");
return -ENODEV;
}
ret = misc_register(&fsl_hv_misc_dev);
if (ret) {
pr_err("fsl-hv: cannot register device\n");
return ret;
}
INIT_LIST_HEAD(&db_list);
INIT_LIST_HEAD(&isr_list);
for_each_compatible_node(np, NULL, "epapr,hv-receive-doorbell") {
unsigned int irq;
const uint32_t *handle;
handle = of_get_property(np, "interrupts", NULL);
irq = irq_of_parse_and_map(np, 0);
if (!handle || !irq) {
pr_err("fsl-hv: no 'interrupts' property in %pOF node\n",
np);
continue;
}
dbisr = kzalloc(sizeof(*dbisr), GFP_KERNEL);
if (!dbisr)
goto out_of_memory;
dbisr->irq = irq;
dbisr->doorbell = be32_to_cpup(handle);
if (of_device_is_compatible(np, "fsl,hv-shutdown-doorbell")) {
/* The shutdown doorbell gets its own ISR */
ret = request_irq(irq, fsl_hv_shutdown_isr, 0,
np->name, NULL);
} else if (of_device_is_compatible(np,
"fsl,hv-state-change-doorbell")) {
/*
* The state change doorbell triggers a notification if
* the state of the managed partition changes to
* "stopped". We need a separate interrupt handler for
* that, and we also need to know the handle of the
* target partition, not just the handle of the
* doorbell.
*/
dbisr->partition = ret = get_parent_handle(np);
if (ret < 0) {
pr_err("fsl-hv: node %pOF has missing or "
"malformed parent\n", np);
kfree(dbisr);
continue;
}
ret = request_threaded_irq(irq, fsl_hv_state_change_isr,
fsl_hv_state_change_thread,
0, np->name, dbisr);
} else
ret = request_irq(irq, fsl_hv_isr, 0, np->name, dbisr);
if (ret < 0) {
pr_err("fsl-hv: could not request irq %u for node %pOF\n",
irq, np);
kfree(dbisr);
continue;
}
list_add(&dbisr->list, &isr_list);
pr_info("fsl-hv: registered handler for doorbell %u\n",
dbisr->doorbell);
}
return 0;
out_of_memory:
list_for_each_entry_safe(dbisr, n, &isr_list, list) {
free_irq(dbisr->irq, dbisr);
list_del(&dbisr->list);
kfree(dbisr);
}
misc_deregister(&fsl_hv_misc_dev);
return -ENOMEM;
}
/*
* Freescale hypervisor management driver termination
*
* This function is called when this driver is unloaded.
*/
static void __exit fsl_hypervisor_exit(void)
{
struct doorbell_isr *dbisr, *n;
list_for_each_entry_safe(dbisr, n, &isr_list, list) {
free_irq(dbisr->irq, dbisr);
list_del(&dbisr->list);
kfree(dbisr);
}
misc_deregister(&fsl_hv_misc_dev);
}
module_init(fsl_hypervisor_init);
module_exit(fsl_hypervisor_exit);
MODULE_AUTHOR("Timur Tabi <[email protected]>");
MODULE_DESCRIPTION("Freescale hypervisor management driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/virt/fsl_hypervisor.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
/**
* DOC: Enclave lifetime management driver for Nitro Enclaves (NE).
* Nitro is a hypervisor that has been developed by Amazon.
*/
#include <linux/anon_inodes.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/hugetlb.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nitro_enclaves.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/range.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <uapi/linux/vm_sockets.h>
#include "ne_misc_dev.h"
#include "ne_pci_dev.h"
/**
* NE_CPUS_SIZE - Size for max 128 CPUs, for now, in a cpu-list string, comma
* separated. The NE CPU pool includes CPUs from a single NUMA
* node.
*/
#define NE_CPUS_SIZE (512)
/**
* NE_EIF_LOAD_OFFSET - The offset where to copy the Enclave Image Format (EIF)
* image in enclave memory.
*/
#define NE_EIF_LOAD_OFFSET (8 * 1024UL * 1024UL)
/**
* NE_MIN_ENCLAVE_MEM_SIZE - The minimum memory size an enclave can be launched
* with.
*/
#define NE_MIN_ENCLAVE_MEM_SIZE (64 * 1024UL * 1024UL)
/**
* NE_MIN_MEM_REGION_SIZE - The minimum size of an enclave memory region.
*/
#define NE_MIN_MEM_REGION_SIZE (2 * 1024UL * 1024UL)
/**
* NE_PARENT_VM_CID - The CID for the vsock device of the primary / parent VM.
*/
#define NE_PARENT_VM_CID (3)
static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static const struct file_operations ne_fops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = ne_ioctl,
};
static struct miscdevice ne_misc_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "nitro_enclaves",
.fops = &ne_fops,
.mode = 0660,
};
struct ne_devs ne_devs = {
.ne_misc_dev = &ne_misc_dev,
};
/*
* TODO: Update logic to create new sysfs entries instead of using
* a kernel parameter e.g. if multiple sysfs files needed.
*/
static int ne_set_kernel_param(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops ne_cpu_pool_ops = {
.get = param_get_string,
.set = ne_set_kernel_param,
};
static char ne_cpus[NE_CPUS_SIZE];
static struct kparam_string ne_cpus_arg = {
.maxlen = sizeof(ne_cpus),
.string = ne_cpus,
};
module_param_cb(ne_cpus, &ne_cpu_pool_ops, &ne_cpus_arg, 0644);
/* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists */
MODULE_PARM_DESC(ne_cpus, "<cpu-list> - CPU pool used for Nitro Enclaves");
/**
* struct ne_cpu_pool - CPU pool used for Nitro Enclaves.
* @avail_threads_per_core: Available full CPU cores to be dedicated to
* enclave(s). The cpumasks from the array, indexed
* by core id, contain all the threads from the
* available cores, that are not set for created
* enclave(s). The full CPU cores are part of the
* NE CPU pool.
* @mutex: Mutex for the access to the NE CPU pool.
* @nr_parent_vm_cores : The size of the available threads per core array.
* The total number of CPU cores available on the
* primary / parent VM.
* @nr_threads_per_core: The number of threads that a full CPU core has.
* @numa_node: NUMA node of the CPUs in the pool.
*/
struct ne_cpu_pool {
cpumask_var_t *avail_threads_per_core;
struct mutex mutex;
unsigned int nr_parent_vm_cores;
unsigned int nr_threads_per_core;
int numa_node;
};
static struct ne_cpu_pool ne_cpu_pool;
/**
* struct ne_phys_contig_mem_regions - Contiguous physical memory regions.
* @num: The number of regions that currently has.
* @regions: The array of physical memory regions.
*/
struct ne_phys_contig_mem_regions {
unsigned long num;
struct range *regions;
};
/**
* ne_check_enclaves_created() - Verify if at least one enclave has been created.
* @void: No parameters provided.
*
* Context: Process context.
* Return:
* * True if at least one enclave is created.
* * False otherwise.
*/
static bool ne_check_enclaves_created(void)
{
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
bool ret = false;
if (!ne_pci_dev)
return ret;
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
if (!list_empty(&ne_pci_dev->enclaves_list))
ret = true;
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
return ret;
}
/**
* ne_setup_cpu_pool() - Set the NE CPU pool after handling sanity checks such
* as not sharing CPU cores with the primary / parent VM
* or not using CPU 0, which should remain available for
* the primary / parent VM. Offline the CPUs from the
* pool after the checks passed.
* @ne_cpu_list: The CPU list used for setting NE CPU pool.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_setup_cpu_pool(const char *ne_cpu_list)
{
int core_id = -1;
unsigned int cpu = 0;
cpumask_var_t cpu_pool;
unsigned int cpu_sibling = 0;
unsigned int i = 0;
int numa_node = -1;
int rc = -EINVAL;
if (!zalloc_cpumask_var(&cpu_pool, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&ne_cpu_pool.mutex);
rc = cpulist_parse(ne_cpu_list, cpu_pool);
if (rc < 0) {
pr_err("%s: Error in cpulist parse [rc=%d]\n", ne_misc_dev.name, rc);
goto free_pool_cpumask;
}
cpu = cpumask_any(cpu_pool);
if (cpu >= nr_cpu_ids) {
pr_err("%s: No CPUs available in CPU pool\n", ne_misc_dev.name);
rc = -EINVAL;
goto free_pool_cpumask;
}
/*
* Check if the CPUs are online, to further get info about them
* e.g. numa node, core id, siblings.
*/
for_each_cpu(cpu, cpu_pool)
if (cpu_is_offline(cpu)) {
pr_err("%s: CPU %d is offline, has to be online to get its metadata\n",
ne_misc_dev.name, cpu);
rc = -EINVAL;
goto free_pool_cpumask;
}
/*
* Check if the CPUs from the NE CPU pool are from the same NUMA node.
*/
for_each_cpu(cpu, cpu_pool)
if (numa_node < 0) {
numa_node = cpu_to_node(cpu);
if (numa_node < 0) {
pr_err("%s: Invalid NUMA node %d\n",
ne_misc_dev.name, numa_node);
rc = -EINVAL;
goto free_pool_cpumask;
}
} else {
if (numa_node != cpu_to_node(cpu)) {
pr_err("%s: CPUs with different NUMA nodes\n",
ne_misc_dev.name);
rc = -EINVAL;
goto free_pool_cpumask;
}
}
/*
* Check if CPU 0 and its siblings are included in the provided CPU pool
* They should remain available for the primary / parent VM.
*/
if (cpumask_test_cpu(0, cpu_pool)) {
pr_err("%s: CPU 0 has to remain available\n", ne_misc_dev.name);
rc = -EINVAL;
goto free_pool_cpumask;
}
for_each_cpu(cpu_sibling, topology_sibling_cpumask(0)) {
if (cpumask_test_cpu(cpu_sibling, cpu_pool)) {
pr_err("%s: CPU sibling %d for CPU 0 is in CPU pool\n",
ne_misc_dev.name, cpu_sibling);
rc = -EINVAL;
goto free_pool_cpumask;
}
}
/*
* Check if CPU siblings are included in the provided CPU pool. The
* expectation is that full CPU cores are made available in the CPU pool
* for enclaves.
*/
for_each_cpu(cpu, cpu_pool) {
for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) {
if (!cpumask_test_cpu(cpu_sibling, cpu_pool)) {
pr_err("%s: CPU %d is not in CPU pool\n",
ne_misc_dev.name, cpu_sibling);
rc = -EINVAL;
goto free_pool_cpumask;
}
}
}
/* Calculate the number of threads from a full CPU core. */
cpu = cpumask_any(cpu_pool);
for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu))
ne_cpu_pool.nr_threads_per_core++;
ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core;
ne_cpu_pool.avail_threads_per_core = kcalloc(ne_cpu_pool.nr_parent_vm_cores,
sizeof(*ne_cpu_pool.avail_threads_per_core),
GFP_KERNEL);
if (!ne_cpu_pool.avail_threads_per_core) {
rc = -ENOMEM;
goto free_pool_cpumask;
}
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) {
rc = -ENOMEM;
goto free_cores_cpumask;
}
/*
* Split the NE CPU pool in threads per core to keep the CPU topology
* after offlining the CPUs.
*/
for_each_cpu(cpu, cpu_pool) {
core_id = topology_core_id(cpu);
if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) {
pr_err("%s: Invalid core id %d for CPU %d\n",
ne_misc_dev.name, core_id, cpu);
rc = -EINVAL;
goto clear_cpumask;
}
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]);
}
/*
* CPUs that are given to enclave(s) should not be considered online
* by Linux anymore, as the hypervisor will degrade them to floating.
* The physical CPUs (full cores) are carved out of the primary / parent
* VM and given to the enclave VM. The same number of vCPUs would run
* on less pCPUs for the primary / parent VM.
*
* We offline them here, to not degrade performance and expose correct
* topology to Linux and user space.
*/
for_each_cpu(cpu, cpu_pool) {
rc = remove_cpu(cpu);
if (rc != 0) {
pr_err("%s: CPU %d is not offlined [rc=%d]\n",
ne_misc_dev.name, cpu, rc);
goto online_cpus;
}
}
free_cpumask_var(cpu_pool);
ne_cpu_pool.numa_node = numa_node;
mutex_unlock(&ne_cpu_pool.mutex);
return 0;
online_cpus:
for_each_cpu(cpu, cpu_pool)
add_cpu(cpu);
clear_cpumask:
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
free_cores_cpumask:
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
kfree(ne_cpu_pool.avail_threads_per_core);
free_pool_cpumask:
free_cpumask_var(cpu_pool);
ne_cpu_pool.nr_parent_vm_cores = 0;
ne_cpu_pool.nr_threads_per_core = 0;
ne_cpu_pool.numa_node = -1;
mutex_unlock(&ne_cpu_pool.mutex);
return rc;
}
/**
* ne_teardown_cpu_pool() - Online the CPUs from the NE CPU pool and cleanup the
* CPU pool.
* @void: No parameters provided.
*
* Context: Process context.
*/
static void ne_teardown_cpu_pool(void)
{
unsigned int cpu = 0;
unsigned int i = 0;
int rc = -EINVAL;
mutex_lock(&ne_cpu_pool.mutex);
if (!ne_cpu_pool.nr_parent_vm_cores) {
mutex_unlock(&ne_cpu_pool.mutex);
return;
}
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) {
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) {
rc = add_cpu(cpu);
if (rc != 0)
pr_err("%s: CPU %d is not onlined [rc=%d]\n",
ne_misc_dev.name, cpu, rc);
}
cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]);
free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]);
}
kfree(ne_cpu_pool.avail_threads_per_core);
ne_cpu_pool.nr_parent_vm_cores = 0;
ne_cpu_pool.nr_threads_per_core = 0;
ne_cpu_pool.numa_node = -1;
mutex_unlock(&ne_cpu_pool.mutex);
}
/**
* ne_set_kernel_param() - Set the NE CPU pool value via the NE kernel parameter.
* @val: NE CPU pool string value.
* @kp : NE kernel parameter associated with the NE CPU pool.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_set_kernel_param(const char *val, const struct kernel_param *kp)
{
char error_val[] = "";
int rc = -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (ne_check_enclaves_created()) {
pr_err("%s: The CPU pool is used by enclave(s)\n", ne_misc_dev.name);
return -EPERM;
}
ne_teardown_cpu_pool();
rc = ne_setup_cpu_pool(val);
if (rc < 0) {
pr_err("%s: Error in setup CPU pool [rc=%d]\n", ne_misc_dev.name, rc);
param_set_copystring(error_val, kp);
return rc;
}
rc = param_set_copystring(val, kp);
if (rc < 0) {
pr_err("%s: Error in param set copystring [rc=%d]\n", ne_misc_dev.name, rc);
ne_teardown_cpu_pool();
param_set_copystring(error_val, kp);
return rc;
}
return 0;
}
/**
* ne_donated_cpu() - Check if the provided CPU is already used by the enclave.
* @ne_enclave : Private data associated with the current enclave.
* @cpu: CPU to check if already used.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * True if the provided CPU is already used by the enclave.
* * False otherwise.
*/
static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu)
{
if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids))
return true;
return false;
}
/**
* ne_get_unused_core_from_cpu_pool() - Get the id of a full core from the
* NE CPU pool.
* @void: No parameters provided.
*
* Context: Process context. This function is called with the ne_enclave and
* ne_cpu_pool mutexes held.
* Return:
* * Core id.
* * -1 if no CPU core available in the pool.
*/
static int ne_get_unused_core_from_cpu_pool(void)
{
int core_id = -1;
unsigned int i = 0;
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) {
core_id = i;
break;
}
return core_id;
}
/**
* ne_set_enclave_threads_per_core() - Set the threads of the provided core in
* the enclave data structure.
* @ne_enclave : Private data associated with the current enclave.
* @core_id: Core id to get its threads from the NE CPU pool.
* @vcpu_id: vCPU id part of the provided core.
*
* Context: Process context. This function is called with the ne_enclave and
* ne_cpu_pool mutexes held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave,
int core_id, u32 vcpu_id)
{
unsigned int cpu = 0;
if (core_id < 0 && vcpu_id == 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"No CPUs available in NE CPU pool\n");
return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
}
if (core_id < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"CPU %d is not in NE CPU pool\n", vcpu_id);
return -NE_ERR_VCPU_NOT_IN_CPU_POOL;
}
if (core_id >= ne_enclave->nr_parent_vm_cores) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Invalid core id %d - ne_enclave\n", core_id);
return -NE_ERR_VCPU_INVALID_CPU_CORE;
}
for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id])
cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]);
cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]);
return 0;
}
/**
* ne_get_cpu_from_cpu_pool() - Get a CPU from the NE CPU pool, either from the
* remaining sibling(s) of a CPU core or the first
* sibling of a new CPU core.
* @ne_enclave : Private data associated with the current enclave.
* @vcpu_id: vCPU to get from the NE CPU pool.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id)
{
int core_id = -1;
unsigned int cpu = 0;
unsigned int i = 0;
int rc = -EINVAL;
/*
* If previously allocated a thread of a core to this enclave, first
* check remaining sibling(s) for new CPU allocations, so that full
* CPU cores are used for the enclave.
*/
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
if (!ne_donated_cpu(ne_enclave, cpu)) {
*vcpu_id = cpu;
return 0;
}
mutex_lock(&ne_cpu_pool.mutex);
/*
* If no remaining siblings, get a core from the NE CPU pool and keep
* track of all the threads in the enclave threads per core data structure.
*/
core_id = ne_get_unused_core_from_cpu_pool();
rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id);
if (rc < 0)
goto unlock_mutex;
*vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]);
rc = 0;
unlock_mutex:
mutex_unlock(&ne_cpu_pool.mutex);
return rc;
}
/**
* ne_get_vcpu_core_from_cpu_pool() - Get from the NE CPU pool the id of the
* core associated with the provided vCPU.
* @vcpu_id: Provided vCPU id to get its associated core id.
*
* Context: Process context. This function is called with the ne_enclave and
* ne_cpu_pool mutexes held.
* Return:
* * Core id.
* * -1 if the provided vCPU is not in the pool.
*/
static int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id)
{
int core_id = -1;
unsigned int i = 0;
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) {
core_id = i;
break;
}
return core_id;
}
/**
* ne_check_cpu_in_cpu_pool() - Check if the given vCPU is in the available CPUs
* from the pool.
* @ne_enclave : Private data associated with the current enclave.
* @vcpu_id: ID of the vCPU to check if available in the NE CPU pool.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id)
{
int core_id = -1;
unsigned int i = 0;
int rc = -EINVAL;
if (ne_donated_cpu(ne_enclave, vcpu_id)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"CPU %d already used\n", vcpu_id);
return -NE_ERR_VCPU_ALREADY_USED;
}
/*
* If previously allocated a thread of a core to this enclave, but not
* the full core, first check remaining sibling(s).
*/
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i]))
return 0;
mutex_lock(&ne_cpu_pool.mutex);
/*
* If no remaining siblings, get from the NE CPU pool the core
* associated with the vCPU and keep track of all the threads in the
* enclave threads per core data structure.
*/
core_id = ne_get_vcpu_core_from_cpu_pool(vcpu_id);
rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id);
if (rc < 0)
goto unlock_mutex;
rc = 0;
unlock_mutex:
mutex_unlock(&ne_cpu_pool.mutex);
return rc;
}
/**
* ne_add_vcpu_ioctl() - Add a vCPU to the slot associated with the current
* enclave.
* @ne_enclave : Private data associated with the current enclave.
* @vcpu_id: ID of the CPU to be associated with the given slot,
* apic id on x86.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
int rc = -EINVAL;
struct slot_add_vcpu_req slot_add_vcpu_req = {};
if (ne_enclave->mm != current->mm)
return -EIO;
slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid;
slot_add_vcpu_req.vcpu_id = vcpu_id;
rc = ne_do_request(pdev, SLOT_ADD_VCPU,
&slot_add_vcpu_req, sizeof(slot_add_vcpu_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot add vCPU [rc=%d]\n", rc);
return rc;
}
cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids);
ne_enclave->nr_vcpus++;
return 0;
}
/**
* ne_sanity_check_user_mem_region() - Sanity check the user space memory
* region received during the set user
* memory region ioctl call.
* @ne_enclave : Private data associated with the current enclave.
* @mem_region : User space memory region to be sanity checked.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave,
struct ne_user_memory_region mem_region)
{
struct ne_mem_region *ne_mem_region = NULL;
if (ne_enclave->mm != current->mm)
return -EIO;
if (mem_region.memory_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"User space memory size is not multiple of 2 MiB\n");
return -NE_ERR_INVALID_MEM_REGION_SIZE;
}
if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"User space address is not 2 MiB aligned\n");
return -NE_ERR_UNALIGNED_MEM_REGION_ADDR;
}
if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) ||
!access_ok((void __user *)(unsigned long)mem_region.userspace_addr,
mem_region.memory_size)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Invalid user space address range\n");
return -NE_ERR_INVALID_MEM_REGION_ADDR;
}
list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list,
mem_region_list_entry) {
u64 memory_size = ne_mem_region->memory_size;
u64 userspace_addr = ne_mem_region->userspace_addr;
if ((userspace_addr <= mem_region.userspace_addr &&
mem_region.userspace_addr < (userspace_addr + memory_size)) ||
(mem_region.userspace_addr <= userspace_addr &&
(mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"User space memory region already used\n");
return -NE_ERR_MEM_REGION_ALREADY_USED;
}
}
return 0;
}
/**
* ne_sanity_check_user_mem_region_page() - Sanity check a page from the user space
* memory region received during the set
* user memory region ioctl call.
* @ne_enclave : Private data associated with the current enclave.
* @mem_region_page: Page from the user space memory region to be sanity checked.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
struct page *mem_region_page)
{
if (!PageHuge(mem_region_page)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Not a hugetlbfs page\n");
return -NE_ERR_MEM_NOT_HUGE_PAGE;
}
if (page_size(mem_region_page) & (NE_MIN_MEM_REGION_SIZE - 1)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Page size not multiple of 2 MiB\n");
return -NE_ERR_INVALID_PAGE_SIZE;
}
if (ne_enclave->numa_node != page_to_nid(mem_region_page)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Page is not from NUMA node %d\n",
ne_enclave->numa_node);
return -NE_ERR_MEM_DIFFERENT_NUMA_NODE;
}
return 0;
}
/**
* ne_sanity_check_phys_mem_region() - Sanity check the start address and the size
* of a physical memory region.
* @phys_mem_region_paddr : Physical start address of the region to be sanity checked.
* @phys_mem_region_size : Length of the region to be sanity checked.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr,
u64 phys_mem_region_size)
{
if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Physical mem region size is not multiple of 2 MiB\n");
return -EINVAL;
}
if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Physical mem region address is not 2 MiB aligned\n");
return -EINVAL;
}
return 0;
}
/**
* ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
* regions if they are physically contiguous.
* @phys_contig_regions : Private data associated with the contiguous physical memory regions.
* @page_paddr : Physical start address of the region to be added.
* @page_size : Length of the region to be added.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int
ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions,
u64 page_paddr, u64 page_size)
{
unsigned long num = phys_contig_regions->num;
int rc = 0;
rc = ne_sanity_check_phys_mem_region(page_paddr, page_size);
if (rc < 0)
return rc;
/* Physically contiguous, just merge */
if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) {
phys_contig_regions->regions[num - 1].end += page_size;
} else {
phys_contig_regions->regions[num].start = page_paddr;
phys_contig_regions->regions[num].end = page_paddr + page_size - 1;
phys_contig_regions->num++;
}
return 0;
}
/**
* ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
* associated with the current enclave.
* @ne_enclave : Private data associated with the current enclave.
* @mem_region : User space memory region to be associated with the given slot.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
struct ne_user_memory_region mem_region)
{
long gup_rc = 0;
unsigned long i = 0;
unsigned long max_nr_pages = 0;
unsigned long memory_size = 0;
struct ne_mem_region *ne_mem_region = NULL;
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
int rc = -EINVAL;
rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
if (rc < 0)
return rc;
ne_mem_region = kzalloc(sizeof(*ne_mem_region), GFP_KERNEL);
if (!ne_mem_region)
return -ENOMEM;
max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE;
ne_mem_region->pages = kcalloc(max_nr_pages, sizeof(*ne_mem_region->pages),
GFP_KERNEL);
if (!ne_mem_region->pages) {
rc = -ENOMEM;
goto free_mem_region;
}
phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
sizeof(*phys_contig_mem_regions.regions),
GFP_KERNEL);
if (!phys_contig_mem_regions.regions) {
rc = -ENOMEM;
goto free_mem_region;
}
do {
i = ne_mem_region->nr_pages;
if (i == max_nr_pages) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Reached max nr of pages in the pages data struct\n");
rc = -ENOMEM;
goto put_pages;
}
gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
ne_mem_region->pages + i, FOLL_GET);
if (gup_rc < 0) {
rc = gup_rc;
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in get user pages [rc=%d]\n", rc);
goto put_pages;
}
rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]);
if (rc < 0)
goto put_pages;
rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
page_to_phys(ne_mem_region->pages[i]),
page_size(ne_mem_region->pages[i]));
if (rc < 0)
goto put_pages;
memory_size += page_size(ne_mem_region->pages[i]);
ne_mem_region->nr_pages++;
} while (memory_size < mem_region.memory_size);
if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) >
ne_enclave->max_mem_regions) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Reached max memory regions %lld\n",
ne_enclave->max_mem_regions);
rc = -NE_ERR_MEM_MAX_REGIONS;
goto put_pages;
}
for (i = 0; i < phys_contig_mem_regions.num; i++) {
u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size);
if (rc < 0)
goto put_pages;
}
ne_mem_region->memory_size = mem_region.memory_size;
ne_mem_region->userspace_addr = mem_region.userspace_addr;
list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
for (i = 0; i < phys_contig_mem_regions.num; i++) {
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct slot_add_mem_req slot_add_mem_req = {};
slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
rc = ne_do_request(pdev, SLOT_ADD_MEM,
&slot_add_mem_req, sizeof(slot_add_mem_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot add mem [rc=%d]\n", rc);
kfree(phys_contig_mem_regions.regions);
/*
* Exit here without put pages as memory regions may
* already been added.
*/
return rc;
}
ne_enclave->mem_size += slot_add_mem_req.size;
ne_enclave->nr_mem_regions++;
}
kfree(phys_contig_mem_regions.regions);
return 0;
put_pages:
for (i = 0; i < ne_mem_region->nr_pages; i++)
put_page(ne_mem_region->pages[i]);
free_mem_region:
kfree(phys_contig_mem_regions.regions);
kfree(ne_mem_region->pages);
kfree(ne_mem_region);
return rc;
}
/**
* ne_start_enclave_ioctl() - Trigger enclave start after the enclave resources,
* such as memory and CPU, have been set.
* @ne_enclave : Private data associated with the current enclave.
* @enclave_start_info : Enclave info that includes enclave cid and flags.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave,
struct ne_enclave_start_info *enclave_start_info)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
unsigned int cpu = 0;
struct enclave_start_req enclave_start_req = {};
unsigned int i = 0;
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
int rc = -EINVAL;
if (!ne_enclave->nr_mem_regions) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave has no mem regions\n");
return -NE_ERR_NO_MEM_REGIONS_ADDED;
}
if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave memory is less than %ld\n",
NE_MIN_ENCLAVE_MEM_SIZE);
return -NE_ERR_ENCLAVE_MEM_MIN_SIZE;
}
if (!ne_enclave->nr_vcpus) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave has no vCPUs\n");
return -NE_ERR_NO_VCPUS_ADDED;
}
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Full CPU cores not used\n");
return -NE_ERR_FULL_CORES_NOT_USED;
}
enclave_start_req.enclave_cid = enclave_start_info->enclave_cid;
enclave_start_req.flags = enclave_start_info->flags;
enclave_start_req.slot_uid = ne_enclave->slot_uid;
rc = ne_do_request(pdev, ENCLAVE_START,
&enclave_start_req, sizeof(enclave_start_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in enclave start [rc=%d]\n", rc);
return rc;
}
ne_enclave->state = NE_STATE_RUNNING;
enclave_start_info->enclave_cid = cmd_reply.enclave_cid;
return 0;
}
/**
* ne_enclave_ioctl() - Ioctl function provided by the enclave file.
* @file: File associated with this ioctl function.
* @cmd: The command that is set for the ioctl call.
* @arg: The argument that is provided for the ioctl call.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static long ne_enclave_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ne_enclave *ne_enclave = file->private_data;
switch (cmd) {
case NE_ADD_VCPU: {
int rc = -EINVAL;
u32 vcpu_id = 0;
if (copy_from_user(&vcpu_id, (void __user *)arg, sizeof(vcpu_id)))
return -EFAULT;
mutex_lock(&ne_enclave->enclave_info_mutex);
if (ne_enclave->state != NE_STATE_INIT) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave is not in init state\n");
mutex_unlock(&ne_enclave->enclave_info_mutex);
return -NE_ERR_NOT_IN_INIT_STATE;
}
if (vcpu_id >= (ne_enclave->nr_parent_vm_cores *
ne_enclave->nr_threads_per_core)) {
dev_err_ratelimited(ne_misc_dev.this_device,
"vCPU id higher than max CPU id\n");
mutex_unlock(&ne_enclave->enclave_info_mutex);
return -NE_ERR_INVALID_VCPU;
}
if (!vcpu_id) {
/* Use the CPU pool for choosing a CPU for the enclave. */
rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id);
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in get CPU from pool [rc=%d]\n",
rc);
mutex_unlock(&ne_enclave->enclave_info_mutex);
return rc;
}
} else {
/* Check if the provided vCPU is available in the NE CPU pool. */
rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id);
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in check CPU %d in pool [rc=%d]\n",
vcpu_id, rc);
mutex_unlock(&ne_enclave->enclave_info_mutex);
return rc;
}
}
rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id);
if (rc < 0) {
mutex_unlock(&ne_enclave->enclave_info_mutex);
return rc;
}
mutex_unlock(&ne_enclave->enclave_info_mutex);
if (copy_to_user((void __user *)arg, &vcpu_id, sizeof(vcpu_id)))
return -EFAULT;
return 0;
}
case NE_GET_IMAGE_LOAD_INFO: {
struct ne_image_load_info image_load_info = {};
if (copy_from_user(&image_load_info, (void __user *)arg, sizeof(image_load_info)))
return -EFAULT;
mutex_lock(&ne_enclave->enclave_info_mutex);
if (ne_enclave->state != NE_STATE_INIT) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave is not in init state\n");
mutex_unlock(&ne_enclave->enclave_info_mutex);
return -NE_ERR_NOT_IN_INIT_STATE;
}
mutex_unlock(&ne_enclave->enclave_info_mutex);
if (!image_load_info.flags ||
image_load_info.flags >= NE_IMAGE_LOAD_MAX_FLAG_VAL) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Incorrect flag in enclave image load info\n");
return -NE_ERR_INVALID_FLAG_VALUE;
}
if (image_load_info.flags == NE_EIF_IMAGE)
image_load_info.memory_offset = NE_EIF_LOAD_OFFSET;
if (copy_to_user((void __user *)arg, &image_load_info, sizeof(image_load_info)))
return -EFAULT;
return 0;
}
case NE_SET_USER_MEMORY_REGION: {
struct ne_user_memory_region mem_region = {};
int rc = -EINVAL;
if (copy_from_user(&mem_region, (void __user *)arg, sizeof(mem_region)))
return -EFAULT;
if (mem_region.flags >= NE_MEMORY_REGION_MAX_FLAG_VAL) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Incorrect flag for user memory region\n");
return -NE_ERR_INVALID_FLAG_VALUE;
}
mutex_lock(&ne_enclave->enclave_info_mutex);
if (ne_enclave->state != NE_STATE_INIT) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave is not in init state\n");
mutex_unlock(&ne_enclave->enclave_info_mutex);
return -NE_ERR_NOT_IN_INIT_STATE;
}
rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region);
if (rc < 0) {
mutex_unlock(&ne_enclave->enclave_info_mutex);
return rc;
}
mutex_unlock(&ne_enclave->enclave_info_mutex);
return 0;
}
case NE_START_ENCLAVE: {
struct ne_enclave_start_info enclave_start_info = {};
int rc = -EINVAL;
if (copy_from_user(&enclave_start_info, (void __user *)arg,
sizeof(enclave_start_info)))
return -EFAULT;
if (enclave_start_info.flags >= NE_ENCLAVE_START_MAX_FLAG_VAL) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Incorrect flag in enclave start info\n");
return -NE_ERR_INVALID_FLAG_VALUE;
}
/*
* Do not use well-known CIDs - 0, 1, 2 - for enclaves.
* VMADDR_CID_ANY = -1U
* VMADDR_CID_HYPERVISOR = 0
* VMADDR_CID_LOCAL = 1
* VMADDR_CID_HOST = 2
* Note: 0 is used as a placeholder to auto-generate an enclave CID.
* http://man7.org/linux/man-pages/man7/vsock.7.html
*/
if (enclave_start_info.enclave_cid > 0 &&
enclave_start_info.enclave_cid <= VMADDR_CID_HOST) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Well-known CID value, not to be used for enclaves\n");
return -NE_ERR_INVALID_ENCLAVE_CID;
}
if (enclave_start_info.enclave_cid == U32_MAX) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Well-known CID value, not to be used for enclaves\n");
return -NE_ERR_INVALID_ENCLAVE_CID;
}
/*
* Do not use the CID of the primary / parent VM for enclaves.
*/
if (enclave_start_info.enclave_cid == NE_PARENT_VM_CID) {
dev_err_ratelimited(ne_misc_dev.this_device,
"CID of the parent VM, not to be used for enclaves\n");
return -NE_ERR_INVALID_ENCLAVE_CID;
}
/* 64-bit CIDs are not yet supported for the vsock device. */
if (enclave_start_info.enclave_cid > U32_MAX) {
dev_err_ratelimited(ne_misc_dev.this_device,
"64-bit CIDs not yet supported for the vsock device\n");
return -NE_ERR_INVALID_ENCLAVE_CID;
}
mutex_lock(&ne_enclave->enclave_info_mutex);
if (ne_enclave->state != NE_STATE_INIT) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Enclave is not in init state\n");
mutex_unlock(&ne_enclave->enclave_info_mutex);
return -NE_ERR_NOT_IN_INIT_STATE;
}
rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info);
if (rc < 0) {
mutex_unlock(&ne_enclave->enclave_info_mutex);
return rc;
}
mutex_unlock(&ne_enclave->enclave_info_mutex);
if (copy_to_user((void __user *)arg, &enclave_start_info,
sizeof(enclave_start_info)))
return -EFAULT;
return 0;
}
default:
return -ENOTTY;
}
return 0;
}
/**
* ne_enclave_remove_all_mem_region_entries() - Remove all memory region entries
* from the enclave data structure.
* @ne_enclave : Private data associated with the current enclave.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
*/
static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave)
{
unsigned long i = 0;
struct ne_mem_region *ne_mem_region = NULL;
struct ne_mem_region *ne_mem_region_tmp = NULL;
list_for_each_entry_safe(ne_mem_region, ne_mem_region_tmp,
&ne_enclave->mem_regions_list,
mem_region_list_entry) {
list_del(&ne_mem_region->mem_region_list_entry);
for (i = 0; i < ne_mem_region->nr_pages; i++)
put_page(ne_mem_region->pages[i]);
kfree(ne_mem_region->pages);
kfree(ne_mem_region);
}
}
/**
* ne_enclave_remove_all_vcpu_id_entries() - Remove all vCPU id entries from
* the enclave data structure.
* @ne_enclave : Private data associated with the current enclave.
*
* Context: Process context. This function is called with the ne_enclave mutex held.
*/
static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave)
{
unsigned int cpu = 0;
unsigned int i = 0;
mutex_lock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) {
for_each_cpu(cpu, ne_enclave->threads_per_core[i])
/* Update the available NE CPU pool. */
cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]);
free_cpumask_var(ne_enclave->threads_per_core[i]);
}
mutex_unlock(&ne_cpu_pool.mutex);
kfree(ne_enclave->threads_per_core);
free_cpumask_var(ne_enclave->vcpu_ids);
}
/**
* ne_pci_dev_remove_enclave_entry() - Remove the enclave entry from the data
* structure that is part of the NE PCI
* device private data.
* @ne_enclave : Private data associated with the current enclave.
* @ne_pci_dev : Private data associated with the PCI device.
*
* Context: Process context. This function is called with the ne_pci_dev enclave
* mutex held.
*/
static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave,
struct ne_pci_dev *ne_pci_dev)
{
struct ne_enclave *ne_enclave_entry = NULL;
struct ne_enclave *ne_enclave_entry_tmp = NULL;
list_for_each_entry_safe(ne_enclave_entry, ne_enclave_entry_tmp,
&ne_pci_dev->enclaves_list, enclave_list_entry) {
if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) {
list_del(&ne_enclave_entry->enclave_list_entry);
break;
}
}
}
/**
* ne_enclave_release() - Release function provided by the enclave file.
* @inode: Inode associated with this file release function.
* @file: File associated with this release function.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_enclave_release(struct inode *inode, struct file *file)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct enclave_stop_req enclave_stop_request = {};
struct ne_enclave *ne_enclave = file->private_data;
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
struct pci_dev *pdev = ne_pci_dev->pdev;
int rc = -EINVAL;
struct slot_free_req slot_free_req = {};
if (!ne_enclave)
return 0;
/*
* Early exit in case there is an error in the enclave creation logic
* and fput() is called on the cleanup path.
*/
if (!ne_enclave->slot_uid)
return 0;
/*
* Acquire the enclave list mutex before the enclave mutex
* in order to avoid deadlocks with @ref ne_event_work_handler.
*/
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
mutex_lock(&ne_enclave->enclave_info_mutex);
if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) {
enclave_stop_request.slot_uid = ne_enclave->slot_uid;
rc = ne_do_request(pdev, ENCLAVE_STOP,
&enclave_stop_request, sizeof(enclave_stop_request),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in enclave stop [rc=%d]\n", rc);
goto unlock_mutex;
}
memset(&cmd_reply, 0, sizeof(cmd_reply));
}
slot_free_req.slot_uid = ne_enclave->slot_uid;
rc = ne_do_request(pdev, SLOT_FREE,
&slot_free_req, sizeof(slot_free_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot free [rc=%d]\n", rc);
goto unlock_mutex;
}
ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev);
ne_enclave_remove_all_mem_region_entries(ne_enclave);
ne_enclave_remove_all_vcpu_id_entries(ne_enclave);
mutex_unlock(&ne_enclave->enclave_info_mutex);
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
kfree(ne_enclave);
return 0;
unlock_mutex:
mutex_unlock(&ne_enclave->enclave_info_mutex);
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
return rc;
}
/**
* ne_enclave_poll() - Poll functionality used for enclave out-of-band events.
* @file: File associated with this poll function.
* @wait: Poll table data structure.
*
* Context: Process context.
* Return:
* * Poll mask.
*/
static __poll_t ne_enclave_poll(struct file *file, poll_table *wait)
{
__poll_t mask = 0;
struct ne_enclave *ne_enclave = file->private_data;
poll_wait(file, &ne_enclave->eventq, wait);
if (ne_enclave->has_event)
mask |= EPOLLHUP;
return mask;
}
static const struct file_operations ne_enclave_fops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.poll = ne_enclave_poll,
.unlocked_ioctl = ne_enclave_ioctl,
.release = ne_enclave_release,
};
/**
* ne_create_vm_ioctl() - Alloc slot to be associated with an enclave. Create
* enclave file descriptor to be further used for enclave
* resources handling e.g. memory regions and CPUs.
* @ne_pci_dev : Private data associated with the PCI device.
* @slot_uid: User pointer to store the generated unique slot id
* associated with an enclave to.
*
* Context: Process context. This function is called with the ne_pci_dev enclave
* mutex held.
* Return:
* * Enclave fd on success.
* * Negative return value on failure.
*/
static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
int enclave_fd = -1;
struct file *enclave_file = NULL;
unsigned int i = 0;
struct ne_enclave *ne_enclave = NULL;
struct pci_dev *pdev = ne_pci_dev->pdev;
int rc = -EINVAL;
struct slot_alloc_req slot_alloc_req = {};
mutex_lock(&ne_cpu_pool.mutex);
for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++)
if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i]))
break;
if (i == ne_cpu_pool.nr_parent_vm_cores) {
dev_err_ratelimited(ne_misc_dev.this_device,
"No CPUs available in CPU pool\n");
mutex_unlock(&ne_cpu_pool.mutex);
return -NE_ERR_NO_CPUS_AVAIL_IN_POOL;
}
mutex_unlock(&ne_cpu_pool.mutex);
ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL);
if (!ne_enclave)
return -ENOMEM;
mutex_lock(&ne_cpu_pool.mutex);
ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores;
ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core;
ne_enclave->numa_node = ne_cpu_pool.numa_node;
mutex_unlock(&ne_cpu_pool.mutex);
ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores,
sizeof(*ne_enclave->threads_per_core),
GFP_KERNEL);
if (!ne_enclave->threads_per_core) {
rc = -ENOMEM;
goto free_ne_enclave;
}
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) {
rc = -ENOMEM;
goto free_cpumask;
}
if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) {
rc = -ENOMEM;
goto free_cpumask;
}
enclave_fd = get_unused_fd_flags(O_CLOEXEC);
if (enclave_fd < 0) {
rc = enclave_fd;
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in getting unused fd [rc=%d]\n", rc);
goto free_cpumask;
}
enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR);
if (IS_ERR(enclave_file)) {
rc = PTR_ERR(enclave_file);
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in anon inode get file [rc=%d]\n", rc);
goto put_fd;
}
rc = ne_do_request(pdev, SLOT_ALLOC,
&slot_alloc_req, sizeof(slot_alloc_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot alloc [rc=%d]\n", rc);
goto put_file;
}
init_waitqueue_head(&ne_enclave->eventq);
ne_enclave->has_event = false;
mutex_init(&ne_enclave->enclave_info_mutex);
ne_enclave->max_mem_regions = cmd_reply.mem_regions;
INIT_LIST_HEAD(&ne_enclave->mem_regions_list);
ne_enclave->mm = current->mm;
ne_enclave->slot_uid = cmd_reply.slot_uid;
ne_enclave->state = NE_STATE_INIT;
list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
/*
* As we're holding the only reference to 'enclave_file', fput()
* will call ne_enclave_release() which will do a proper cleanup
* of all so far allocated resources, leaving only the unused fd
* for us to free.
*/
fput(enclave_file);
put_unused_fd(enclave_fd);
return -EFAULT;
}
fd_install(enclave_fd, enclave_file);
return enclave_fd;
put_file:
fput(enclave_file);
put_fd:
put_unused_fd(enclave_fd);
free_cpumask:
free_cpumask_var(ne_enclave->vcpu_ids);
for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++)
free_cpumask_var(ne_enclave->threads_per_core[i]);
kfree(ne_enclave->threads_per_core);
free_ne_enclave:
kfree(ne_enclave);
return rc;
}
/**
* ne_ioctl() - Ioctl function provided by the NE misc device.
* @file: File associated with this ioctl function.
* @cmd: The command that is set for the ioctl call.
* @arg: The argument that is provided for the ioctl call.
*
* Context: Process context.
* Return:
* * Ioctl result (e.g. enclave file descriptor) on success.
* * Negative return value on failure.
*/
static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NE_CREATE_VM: {
int enclave_fd = -1;
struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
u64 __user *slot_uid = (void __user *)arg;
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
return enclave_fd;
}
default:
return -ENOTTY;
}
return 0;
}
#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST)
#include "ne_misc_dev_test.c"
#endif
static int __init ne_init(void)
{
mutex_init(&ne_cpu_pool.mutex);
return pci_register_driver(&ne_pci_driver);
}
static void __exit ne_exit(void)
{
pci_unregister_driver(&ne_pci_driver);
ne_teardown_cpu_pool();
}
module_init(ne_init);
module_exit(ne_exit);
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION("Nitro Enclaves Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/virt/nitro_enclaves/ne_misc_dev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*/
/**
* DOC: Nitro Enclaves (NE) PCI device driver.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nitro_enclaves.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "ne_misc_dev.h"
#include "ne_pci_dev.h"
/**
* NE_DEFAULT_TIMEOUT_MSECS - Default timeout to wait for a reply from
* the NE PCI device.
*/
#define NE_DEFAULT_TIMEOUT_MSECS (120000) /* 120 sec */
static const struct pci_device_id ne_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_NE) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ne_pci_ids);
/**
* ne_submit_request() - Submit command request to the PCI device based on the
* command type.
* @pdev: PCI device to send the command to.
* @cmd_type: Command type of the request sent to the PCI device.
* @cmd_request: Command request payload.
* @cmd_request_size: Size of the command request payload.
*
* Context: Process context. This function is called with the ne_pci_dev mutex held.
*/
static void ne_submit_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
void *cmd_request, size_t cmd_request_size)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request, cmd_request_size);
iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND);
}
/**
* ne_retrieve_reply() - Retrieve reply from the PCI device.
* @pdev: PCI device to receive the reply from.
* @cmd_reply: Command reply payload.
* @cmd_reply_size: Size of the command reply payload.
*
* Context: Process context. This function is called with the ne_pci_dev mutex held.
*/
static void ne_retrieve_reply(struct pci_dev *pdev, struct ne_pci_dev_cmd_reply *cmd_reply,
size_t cmd_reply_size)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA, cmd_reply_size);
}
/**
* ne_wait_for_reply() - Wait for a reply of a PCI device command.
* @pdev: PCI device for which a reply is waited.
*
* Context: Process context. This function is called with the ne_pci_dev mutex held.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_wait_for_reply(struct pci_dev *pdev)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
int rc = -EINVAL;
/*
* TODO: Update to _interruptible and handle interrupted wait event
* e.g. -ERESTARTSYS, incoming signals + update timeout, if needed.
*/
rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q,
atomic_read(&ne_pci_dev->cmd_reply_avail) != 0,
msecs_to_jiffies(NE_DEFAULT_TIMEOUT_MSECS));
if (!rc)
return -ETIMEDOUT;
return 0;
}
int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type,
void *cmd_request, size_t cmd_request_size,
struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
int rc = -EINVAL;
if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) {
dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n", cmd_type);
return -EINVAL;
}
if (!cmd_request) {
dev_err_ratelimited(&pdev->dev, "Null cmd request for cmd type=%u\n",
cmd_type);
return -EINVAL;
}
if (cmd_request_size > NE_SEND_DATA_SIZE) {
dev_err_ratelimited(&pdev->dev, "Invalid req size=%zu for cmd type=%u\n",
cmd_request_size, cmd_type);
return -EINVAL;
}
if (!cmd_reply) {
dev_err_ratelimited(&pdev->dev, "Null cmd reply for cmd type=%u\n",
cmd_type);
return -EINVAL;
}
if (cmd_reply_size > NE_RECV_DATA_SIZE) {
dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu for cmd type=%u\n",
cmd_reply_size, cmd_type);
return -EINVAL;
}
/*
* Use this mutex so that the PCI device handles one command request at
* a time.
*/
mutex_lock(&ne_pci_dev->pci_dev_mutex);
atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size);
rc = ne_wait_for_reply(pdev);
if (rc < 0) {
dev_err_ratelimited(&pdev->dev, "Error in wait for reply for cmd type=%u [rc=%d]\n",
cmd_type, rc);
goto unlock_mutex;
}
ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size);
atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
if (cmd_reply->rc < 0) {
rc = cmd_reply->rc;
dev_err_ratelimited(&pdev->dev, "Error in cmd process logic, cmd type=%u [rc=%d]\n",
cmd_type, rc);
goto unlock_mutex;
}
rc = 0;
unlock_mutex:
mutex_unlock(&ne_pci_dev->pci_dev_mutex);
return rc;
}
/**
* ne_reply_handler() - Interrupt handler for retrieving a reply matching a
* request sent to the PCI device for enclave lifetime
* management.
* @irq: Received interrupt for a reply sent by the PCI device.
* @args: PCI device private data structure.
*
* Context: Interrupt context.
* Return:
* * IRQ_HANDLED on handled interrupt.
*/
static irqreturn_t ne_reply_handler(int irq, void *args)
{
struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
atomic_set(&ne_pci_dev->cmd_reply_avail, 1);
/* TODO: Update to _interruptible. */
wake_up(&ne_pci_dev->cmd_reply_wait_q);
return IRQ_HANDLED;
}
/**
* ne_event_work_handler() - Work queue handler for notifying enclaves on a
* state change received by the event interrupt
* handler.
* @work: Item containing the NE PCI device for which an out-of-band event
* was issued.
*
* An out-of-band event is being issued by the Nitro Hypervisor when at least
* one enclave is changing state without client interaction.
*
* Context: Work queue context.
*/
static void ne_event_work_handler(struct work_struct *work)
{
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct ne_enclave *ne_enclave = NULL;
struct ne_pci_dev *ne_pci_dev =
container_of(work, struct ne_pci_dev, notify_work);
struct pci_dev *pdev = ne_pci_dev->pdev;
int rc = -EINVAL;
struct slot_info_req slot_info_req = {};
mutex_lock(&ne_pci_dev->enclaves_list_mutex);
/*
* Iterate over all enclaves registered for the Nitro Enclaves
* PCI device and determine for which enclave(s) the out-of-band event
* is corresponding to.
*/
list_for_each_entry(ne_enclave, &ne_pci_dev->enclaves_list, enclave_list_entry) {
mutex_lock(&ne_enclave->enclave_info_mutex);
/*
* Enclaves that were never started cannot receive out-of-band
* events.
*/
if (ne_enclave->state != NE_STATE_RUNNING)
goto unlock;
slot_info_req.slot_uid = ne_enclave->slot_uid;
rc = ne_do_request(pdev, SLOT_INFO,
&slot_info_req, sizeof(slot_info_req),
&cmd_reply, sizeof(cmd_reply));
if (rc < 0)
dev_err(&pdev->dev, "Error in slot info [rc=%d]\n", rc);
/* Notify enclave process that the enclave state changed. */
if (ne_enclave->state != cmd_reply.state) {
ne_enclave->state = cmd_reply.state;
ne_enclave->has_event = true;
wake_up_interruptible(&ne_enclave->eventq);
}
unlock:
mutex_unlock(&ne_enclave->enclave_info_mutex);
}
mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
}
/**
* ne_event_handler() - Interrupt handler for PCI device out-of-band events.
* This interrupt does not supply any data in the MMIO
* region. It notifies a change in the state of any of
* the launched enclaves.
* @irq: Received interrupt for an out-of-band event.
* @args: PCI device private data structure.
*
* Context: Interrupt context.
* Return:
* * IRQ_HANDLED on handled interrupt.
*/
static irqreturn_t ne_event_handler(int irq, void *args)
{
struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args;
queue_work(ne_pci_dev->event_wq, &ne_pci_dev->notify_work);
return IRQ_HANDLED;
}
/**
* ne_setup_msix() - Setup MSI-X vectors for the PCI device.
* @pdev: PCI device to setup the MSI-X for.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_setup_msix(struct pci_dev *pdev)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
int nr_vecs = 0;
int rc = -EINVAL;
nr_vecs = pci_msix_vec_count(pdev);
if (nr_vecs < 0) {
rc = nr_vecs;
dev_err(&pdev->dev, "Error in getting vec count [rc=%d]\n", rc);
return rc;
}
rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
if (rc < 0) {
dev_err(&pdev->dev, "Error in alloc MSI-X vecs [rc=%d]\n", rc);
return rc;
}
/*
* This IRQ gets triggered every time the PCI device responds to a
* command request. The reply is then retrieved, reading from the MMIO
* space of the PCI device.
*/
rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_reply_handler,
0, "enclave_cmd", ne_pci_dev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in request irq reply [rc=%d]\n", rc);
goto free_irq_vectors;
}
ne_pci_dev->event_wq = create_singlethread_workqueue("ne_pci_dev_wq");
if (!ne_pci_dev->event_wq) {
rc = -ENOMEM;
dev_err(&pdev->dev, "Cannot get wq for dev events [rc=%d]\n", rc);
goto free_reply_irq_vec;
}
INIT_WORK(&ne_pci_dev->notify_work, ne_event_work_handler);
/*
* This IRQ gets triggered every time any enclave's state changes. Its
* handler then scans for the changes and propagates them to the user
* space.
*/
rc = request_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_event_handler,
0, "enclave_evt", ne_pci_dev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in request irq event [rc=%d]\n", rc);
goto destroy_wq;
}
return 0;
destroy_wq:
destroy_workqueue(ne_pci_dev->event_wq);
free_reply_irq_vec:
free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
free_irq_vectors:
pci_free_irq_vectors(pdev);
return rc;
}
/**
* ne_teardown_msix() - Teardown MSI-X vectors for the PCI device.
* @pdev: PCI device to teardown the MSI-X for.
*
* Context: Process context.
*/
static void ne_teardown_msix(struct pci_dev *pdev)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev);
flush_work(&ne_pci_dev->notify_work);
destroy_workqueue(ne_pci_dev->event_wq);
free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
pci_free_irq_vectors(pdev);
}
/**
* ne_pci_dev_enable() - Select the PCI device version and enable it.
* @pdev: PCI device to select version for and then enable.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_pci_dev_enable(struct pci_dev *pdev)
{
u8 dev_enable_reply = 0;
u16 dev_version_reply = 0;
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
iowrite16(NE_VERSION_MAX, ne_pci_dev->iomem_base + NE_VERSION);
dev_version_reply = ioread16(ne_pci_dev->iomem_base + NE_VERSION);
if (dev_version_reply != NE_VERSION_MAX) {
dev_err(&pdev->dev, "Error in pci dev version cmd\n");
return -EIO;
}
iowrite8(NE_ENABLE_ON, ne_pci_dev->iomem_base + NE_ENABLE);
dev_enable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
if (dev_enable_reply != NE_ENABLE_ON) {
dev_err(&pdev->dev, "Error in pci dev enable cmd\n");
return -EIO;
}
return 0;
}
/**
* ne_pci_dev_disable() - Disable the PCI device.
* @pdev: PCI device to disable.
*
* Context: Process context.
*/
static void ne_pci_dev_disable(struct pci_dev *pdev)
{
u8 dev_disable_reply = 0;
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
const unsigned int sleep_time = 10; /* 10 ms */
unsigned int sleep_time_count = 0;
iowrite8(NE_ENABLE_OFF, ne_pci_dev->iomem_base + NE_ENABLE);
/*
* Check for NE_ENABLE_OFF in a loop, to handle cases when the device
* state is not immediately set to disabled and going through a
* transitory state of disabling.
*/
while (sleep_time_count < NE_DEFAULT_TIMEOUT_MSECS) {
dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
if (dev_disable_reply == NE_ENABLE_OFF)
return;
msleep_interruptible(sleep_time);
sleep_time_count += sleep_time;
}
dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE);
if (dev_disable_reply != NE_ENABLE_OFF)
dev_err(&pdev->dev, "Error in pci dev disable cmd\n");
}
/**
* ne_pci_probe() - Probe function for the NE PCI device.
* @pdev: PCI device to match with the NE PCI driver.
* @id : PCI device id table associated with the NE PCI driver.
*
* Context: Process context.
* Return:
* * 0 on success.
* * Negative return value on failure.
*/
static int ne_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ne_pci_dev *ne_pci_dev = NULL;
int rc = -EINVAL;
ne_pci_dev = kzalloc(sizeof(*ne_pci_dev), GFP_KERNEL);
if (!ne_pci_dev)
return -ENOMEM;
rc = pci_enable_device(pdev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in pci dev enable [rc=%d]\n", rc);
goto free_ne_pci_dev;
}
pci_set_master(pdev);
rc = pci_request_regions_exclusive(pdev, "nitro_enclaves");
if (rc < 0) {
dev_err(&pdev->dev, "Error in pci request regions [rc=%d]\n", rc);
goto disable_pci_dev;
}
ne_pci_dev->iomem_base = pci_iomap(pdev, PCI_BAR_NE, 0);
if (!ne_pci_dev->iomem_base) {
rc = -ENOMEM;
dev_err(&pdev->dev, "Error in pci iomap [rc=%d]\n", rc);
goto release_pci_regions;
}
pci_set_drvdata(pdev, ne_pci_dev);
rc = ne_setup_msix(pdev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in pci dev msix setup [rc=%d]\n", rc);
goto iounmap_pci_bar;
}
ne_pci_dev_disable(pdev);
rc = ne_pci_dev_enable(pdev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in ne_pci_dev enable [rc=%d]\n", rc);
goto teardown_msix;
}
atomic_set(&ne_pci_dev->cmd_reply_avail, 0);
init_waitqueue_head(&ne_pci_dev->cmd_reply_wait_q);
INIT_LIST_HEAD(&ne_pci_dev->enclaves_list);
mutex_init(&ne_pci_dev->enclaves_list_mutex);
mutex_init(&ne_pci_dev->pci_dev_mutex);
ne_pci_dev->pdev = pdev;
ne_devs.ne_pci_dev = ne_pci_dev;
rc = misc_register(ne_devs.ne_misc_dev);
if (rc < 0) {
dev_err(&pdev->dev, "Error in misc dev register [rc=%d]\n", rc);
goto disable_ne_pci_dev;
}
return 0;
disable_ne_pci_dev:
ne_devs.ne_pci_dev = NULL;
ne_pci_dev_disable(pdev);
teardown_msix:
ne_teardown_msix(pdev);
iounmap_pci_bar:
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ne_pci_dev->iomem_base);
release_pci_regions:
pci_release_regions(pdev);
disable_pci_dev:
pci_disable_device(pdev);
free_ne_pci_dev:
kfree(ne_pci_dev);
return rc;
}
/**
* ne_pci_remove() - Remove function for the NE PCI device.
* @pdev: PCI device associated with the NE PCI driver.
*
* Context: Process context.
*/
static void ne_pci_remove(struct pci_dev *pdev)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
misc_deregister(ne_devs.ne_misc_dev);
ne_devs.ne_pci_dev = NULL;
ne_pci_dev_disable(pdev);
ne_teardown_msix(pdev);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ne_pci_dev->iomem_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(ne_pci_dev);
}
/**
* ne_pci_shutdown() - Shutdown function for the NE PCI device.
* @pdev: PCI device associated with the NE PCI driver.
*
* Context: Process context.
*/
static void ne_pci_shutdown(struct pci_dev *pdev)
{
struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev);
if (!ne_pci_dev)
return;
misc_deregister(ne_devs.ne_misc_dev);
ne_devs.ne_pci_dev = NULL;
ne_pci_dev_disable(pdev);
ne_teardown_msix(pdev);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ne_pci_dev->iomem_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(ne_pci_dev);
}
/*
* TODO: Add suspend / resume functions for power management w/ CONFIG_PM, if
* needed.
*/
/* NE PCI device driver. */
struct pci_driver ne_pci_driver = {
.name = "nitro_enclaves",
.id_table = ne_pci_ids,
.probe = ne_pci_probe,
.remove = ne_pci_remove,
.shutdown = ne_pci_shutdown,
};
| linux-master | drivers/virt/nitro_enclaves/ne_pci_dev.c |
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
#define MAX_PHYS_REGIONS 16
#define INVALID_VALUE (~0ull)
struct ne_phys_regions_test {
u64 paddr;
u64 size;
int expect_rc;
unsigned long expect_num;
u64 expect_last_paddr;
u64 expect_last_size;
} phys_regions_test_cases[] = {
/*
* Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
* Expected result:
* Failed, start address is not 2M-aligned
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 0
* regions = {}
*/
{0x1000, 0x200000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
/*
* Add the region from 0x200000 to (0x200000 + 0x1000 - 1):
* Expected result:
* Failed, size is not 2M-aligned
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 0
* regions = {}
*/
{0x200000, 0x1000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
/*
* Add the region from 0x200000 to (0x200000 + 0x200000 - 1):
* Expected result:
* Successful
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 1
* regions = {
* {start=0x200000, end=0x3fffff}, // len=0x200000
* }
*/
{0x200000, 0x200000, 0, 1, 0x200000, 0x200000},
/*
* Add the region from 0x0 to (0x0 + 0x200000 - 1):
* Expected result:
* Successful
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 2
* regions = {
* {start=0x200000, end=0x3fffff}, // len=0x200000
* {start=0x0, end=0x1fffff}, // len=0x200000
* }
*/
{0x0, 0x200000, 0, 2, 0x0, 0x200000},
/*
* Add the region from 0x600000 to (0x600000 + 0x400000 - 1):
* Expected result:
* Successful
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 3
* regions = {
* {start=0x200000, end=0x3fffff}, // len=0x200000
* {start=0x0, end=0x1fffff}, // len=0x200000
* {start=0x600000, end=0x9fffff}, // len=0x400000
* }
*/
{0x600000, 0x400000, 0, 3, 0x600000, 0x400000},
/*
* Add the region from 0xa00000 to (0xa00000 + 0x400000 - 1):
* Expected result:
* Successful, merging case!
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 3
* regions = {
* {start=0x200000, end=0x3fffff}, // len=0x200000
* {start=0x0, end=0x1fffff}, // len=0x200000
* {start=0x600000, end=0xdfffff}, // len=0x800000
* }
*/
{0xa00000, 0x400000, 0, 3, 0x600000, 0x800000},
/*
* Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
* Expected result:
* Failed, start address is not 2M-aligned
*
* Now the instance of struct ne_phys_contig_mem_regions is:
* num = 3
* regions = {
* {start=0x200000, end=0x3fffff}, // len=0x200000
* {start=0x0, end=0x1fffff}, // len=0x200000
* {start=0x600000, end=0xdfffff}, // len=0x800000
* }
*/
{0x1000, 0x200000, -EINVAL, 3, 0x600000, 0x800000},
};
static void ne_misc_dev_test_merge_phys_contig_memory_regions(struct kunit *test)
{
struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
int rc = 0;
int i = 0;
phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS,
sizeof(*phys_contig_mem_regions.regions),
GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions);
for (i = 0; i < ARRAY_SIZE(phys_regions_test_cases); i++) {
struct ne_phys_regions_test *test_case = &phys_regions_test_cases[i];
unsigned long num = 0;
rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
test_case->paddr, test_case->size);
KUNIT_EXPECT_EQ(test, rc, test_case->expect_rc);
KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.num, test_case->expect_num);
if (test_case->expect_last_paddr == INVALID_VALUE)
continue;
num = phys_contig_mem_regions.num;
KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start,
test_case->expect_last_paddr);
KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]),
test_case->expect_last_size);
}
kunit_kfree(test, phys_contig_mem_regions.regions);
}
static struct kunit_case ne_misc_dev_test_cases[] = {
KUNIT_CASE(ne_misc_dev_test_merge_phys_contig_memory_regions),
{}
};
static struct kunit_suite ne_misc_dev_test_suite = {
.name = "ne_misc_dev_test",
.test_cases = ne_misc_dev_test_cases,
};
kunit_test_suite(ne_misc_dev_test_suite);
| linux-master | drivers/virt/nitro_enclaves/ne_misc_dev_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Secure Encrypted Virtualization (SEV) guest driver interface
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Author: Brijesh Singh <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/set_memory.h>
#include <linux/fs.h>
#include <crypto/aead.h>
#include <linux/scatterlist.h>
#include <linux/psp-sev.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include "sev-guest.h"
#define DEVICE_NAME "sev-guest"
#define AAD_LEN 48
#define MSG_HDR_VER 1
#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
#define SNP_REQ_RETRY_DELAY (2*HZ)
struct snp_guest_crypto {
struct crypto_aead *tfm;
u8 *iv, *authtag;
int iv_len, a_len;
};
struct snp_guest_dev {
struct device *dev;
struct miscdevice misc;
void *certs_data;
struct snp_guest_crypto *crypto;
/* request and response are in unencrypted memory */
struct snp_guest_msg *request, *response;
/*
* Avoid information leakage by double-buffering shared messages
* in fields that are in regular encrypted memory.
*/
struct snp_guest_msg secret_request, secret_response;
struct snp_secrets_page_layout *layout;
struct snp_req_data input;
u32 *os_area_msg_seqno;
u8 *vmpck;
};
static u32 vmpck_id;
module_param(vmpck_id, uint, 0444);
MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP.");
/* Mutex to serialize the shared buffer access and command handling. */
static DEFINE_MUTEX(snp_cmd_mutex);
static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
{
char zero_key[VMPCK_KEY_LEN] = {0};
if (snp_dev->vmpck)
return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN);
return true;
}
/*
* If an error is received from the host or AMD Secure Processor (ASP) there
* are two options. Either retry the exact same encrypted request or discontinue
* using the VMPCK.
*
* This is because in the current encryption scheme GHCB v2 uses AES-GCM to
* encrypt the requests. The IV for this scheme is the sequence number. GCM
* cannot tolerate IV reuse.
*
* The ASP FW v1.51 only increments the sequence numbers on a successful
* guest<->ASP back and forth and only accepts messages at its exact sequence
* number.
*
* So if the sequence number were to be reused the encryption scheme is
* vulnerable. If the sequence number were incremented for a fresh IV the ASP
* will reject the request.
*/
static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
{
dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
vmpck_id);
memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
snp_dev->vmpck = NULL;
}
static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
{
u64 count;
lockdep_assert_held(&snp_cmd_mutex);
/* Read the current message sequence counter from secrets pages */
count = *snp_dev->os_area_msg_seqno;
return count + 1;
}
/* Return a non-zero on success */
static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev)
{
u64 count = __snp_get_msg_seqno(snp_dev);
/*
* The message sequence counter for the SNP guest request is a 64-bit
* value but the version 2 of GHCB specification defines a 32-bit storage
* for it. If the counter exceeds the 32-bit value then return zero.
* The caller should check the return value, but if the caller happens to
* not check the value and use it, then the firmware treats zero as an
* invalid number and will fail the message request.
*/
if (count >= UINT_MAX) {
dev_err(snp_dev->dev, "request message sequence counter overflow\n");
return 0;
}
return count;
}
static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev)
{
/*
* The counter is also incremented by the PSP, so increment it by 2
* and save in secrets page.
*/
*snp_dev->os_area_msg_seqno += 2;
}
static inline struct snp_guest_dev *to_snp_dev(struct file *file)
{
struct miscdevice *dev = file->private_data;
return container_of(dev, struct snp_guest_dev, misc);
}
static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen)
{
struct snp_guest_crypto *crypto;
crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT);
if (!crypto)
return NULL;
crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(crypto->tfm))
goto e_free;
if (crypto_aead_setkey(crypto->tfm, key, keylen))
goto e_free_crypto;
crypto->iv_len = crypto_aead_ivsize(crypto->tfm);
crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT);
if (!crypto->iv)
goto e_free_crypto;
if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) {
if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) {
dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN);
goto e_free_iv;
}
}
crypto->a_len = crypto_aead_authsize(crypto->tfm);
crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT);
if (!crypto->authtag)
goto e_free_iv;
return crypto;
e_free_iv:
kfree(crypto->iv);
e_free_crypto:
crypto_free_aead(crypto->tfm);
e_free:
kfree(crypto);
return NULL;
}
static void deinit_crypto(struct snp_guest_crypto *crypto)
{
crypto_free_aead(crypto->tfm);
kfree(crypto->iv);
kfree(crypto->authtag);
kfree(crypto);
}
static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg,
u8 *src_buf, u8 *dst_buf, size_t len, bool enc)
{
struct snp_guest_msg_hdr *hdr = &msg->hdr;
struct scatterlist src[3], dst[3];
DECLARE_CRYPTO_WAIT(wait);
struct aead_request *req;
int ret;
req = aead_request_alloc(crypto->tfm, GFP_KERNEL);
if (!req)
return -ENOMEM;
/*
* AEAD memory operations:
* +------ AAD -------+------- DATA -----+---- AUTHTAG----+
* | msg header | plaintext | hdr->authtag |
* | bytes 30h - 5Fh | or | |
* | | cipher | |
* +------------------+------------------+----------------+
*/
sg_init_table(src, 3);
sg_set_buf(&src[0], &hdr->algo, AAD_LEN);
sg_set_buf(&src[1], src_buf, hdr->msg_sz);
sg_set_buf(&src[2], hdr->authtag, crypto->a_len);
sg_init_table(dst, 3);
sg_set_buf(&dst[0], &hdr->algo, AAD_LEN);
sg_set_buf(&dst[1], dst_buf, hdr->msg_sz);
sg_set_buf(&dst[2], hdr->authtag, crypto->a_len);
aead_request_set_ad(req, AAD_LEN);
aead_request_set_tfm(req, crypto->tfm);
aead_request_set_callback(req, 0, crypto_req_done, &wait);
aead_request_set_crypt(req, src, dst, len, crypto->iv);
ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait);
aead_request_free(req);
return ret;
}
static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
void *plaintext, size_t len)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_guest_msg_hdr *hdr = &msg->hdr;
memset(crypto->iv, 0, crypto->iv_len);
memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true);
}
static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
void *plaintext, size_t len)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_guest_msg_hdr *hdr = &msg->hdr;
/* Build IV with response buffer sequence number */
memset(crypto->iv, 0, crypto->iv_len);
memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno));
return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false);
}
static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_guest_msg *resp = &snp_dev->secret_response;
struct snp_guest_msg *req = &snp_dev->secret_request;
struct snp_guest_msg_hdr *req_hdr = &req->hdr;
struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
/* Copy response from shared memory to encrypted memory. */
memcpy(resp, snp_dev->response, sizeof(*resp));
/* Verify that the sequence counter is incremented by 1 */
if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
return -EBADMSG;
/* Verify response message type and version number. */
if (resp_hdr->msg_type != (req_hdr->msg_type + 1) ||
resp_hdr->msg_version != req_hdr->msg_version)
return -EBADMSG;
/*
* If the message size is greater than our buffer length then return
* an error.
*/
if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz))
return -EBADMSG;
/* Decrypt the payload */
return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len);
}
static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
void *payload, size_t sz)
{
struct snp_guest_msg *req = &snp_dev->secret_request;
struct snp_guest_msg_hdr *hdr = &req->hdr;
memset(req, 0, sizeof(*req));
hdr->algo = SNP_AEAD_AES_256_GCM;
hdr->hdr_version = MSG_HDR_VER;
hdr->hdr_sz = sizeof(*hdr);
hdr->msg_type = type;
hdr->msg_version = version;
hdr->msg_seqno = seqno;
hdr->msg_vmpck = vmpck_id;
hdr->msg_sz = sz;
/* Verify the sequence number is non-zero */
if (!hdr->msg_seqno)
return -ENOSR;
dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n",
hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz);
return __enc_payload(snp_dev, req, payload, sz);
}
static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
struct snp_guest_request_ioctl *rio)
{
unsigned long req_start = jiffies;
unsigned int override_npages = 0;
u64 override_err = 0;
int rc;
retry_request:
/*
* Call firmware to process the request. In this function the encrypted
* message enters shared memory with the host. So after this call the
* sequence number must be incremented or the VMPCK must be deleted to
* prevent reuse of the IV.
*/
rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio);
switch (rc) {
case -ENOSPC:
/*
* If the extended guest request fails due to having too
* small of a certificate data buffer, retry the same
* guest request without the extended data request in
* order to increment the sequence number and thus avoid
* IV reuse.
*/
override_npages = snp_dev->input.data_npages;
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
/*
* Override the error to inform callers the given extended
* request buffer size was too small and give the caller the
* required buffer size.
*/
override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN);
/*
* If this call to the firmware succeeds, the sequence number can
* be incremented allowing for continued use of the VMPCK. If
* there is an error reflected in the return value, this value
* is checked further down and the result will be the deletion
* of the VMPCK and the error code being propagated back to the
* user as an ioctl() return code.
*/
goto retry_request;
/*
* The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been
* throttled. Retry in the driver to avoid returning and reusing the
* message sequence number on a different message.
*/
case -EAGAIN:
if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
rc = -ETIMEDOUT;
break;
}
schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
goto retry_request;
}
/*
* Increment the message sequence number. There is no harm in doing
* this now because decryption uses the value stored in the response
* structure and any failure will wipe the VMPCK, preventing further
* use anyway.
*/
snp_inc_msg_seqno(snp_dev);
if (override_err) {
rio->exitinfo2 = override_err;
/*
* If an extended guest request was issued and the supplied certificate
* buffer was not large enough, a standard guest request was issued to
* prevent IV reuse. If the standard request was successful, return -EIO
* back to the caller as would have originally been returned.
*/
if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
rc = -EIO;
}
if (override_npages)
snp_dev->input.data_npages = override_npages;
return rc;
}
static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
struct snp_guest_request_ioctl *rio, u8 type,
void *req_buf, size_t req_sz, void *resp_buf,
u32 resp_sz)
{
u64 seqno;
int rc;
/* Get message sequence and verify that its a non-zero */
seqno = snp_get_msg_seqno(snp_dev);
if (!seqno)
return -EIO;
/* Clear shared memory's response for the host to populate. */
memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
/* Encrypt the userspace provided payload in snp_dev->secret_request. */
rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz);
if (rc)
return rc;
/*
* Write the fully encrypted request to the shared unencrypted
* request page.
*/
memcpy(snp_dev->request, &snp_dev->secret_request,
sizeof(snp_dev->secret_request));
rc = __handle_guest_request(snp_dev, exit_code, rio);
if (rc) {
if (rc == -EIO &&
rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN))
return rc;
dev_alert(snp_dev->dev,
"Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n",
rc, rio->exitinfo2);
snp_disable_vmpck(snp_dev);
return rc;
}
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
if (rc) {
dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
snp_disable_vmpck(snp_dev);
return rc;
}
return 0;
}
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_report_resp *resp;
struct snp_report_req req;
int rc, resp_len;
lockdep_assert_held(&snp_cmd_mutex);
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
return -EFAULT;
/*
* The intermediate response buffer is used while decrypting the
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
resp_len = sizeof(resp->data) + crypto->a_len;
resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
if (!resp)
return -ENOMEM;
rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
resp_len);
if (rc)
goto e_free;
if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
rc = -EFAULT;
e_free:
kfree(resp);
return rc;
}
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_derived_key_resp resp = {0};
struct snp_derived_key_req req;
int rc, resp_len;
/* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
u8 buf[64 + 16];
lockdep_assert_held(&snp_cmd_mutex);
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
/*
* The intermediate response buffer is used while decrypting the
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
resp_len = sizeof(resp.data) + crypto->a_len;
if (sizeof(buf) < resp_len)
return -ENOMEM;
if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
return -EFAULT;
rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
if (rc)
return rc;
memcpy(resp.data, buf, sizeof(resp.data));
if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp)))
rc = -EFAULT;
/* The response buffer contains the sensitive data, explicitly clear it. */
memzero_explicit(buf, sizeof(buf));
memzero_explicit(&resp, sizeof(resp));
return rc;
}
static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
struct snp_guest_crypto *crypto = snp_dev->crypto;
struct snp_ext_report_req req;
struct snp_report_resp *resp;
int ret, npages = 0, resp_len;
lockdep_assert_held(&snp_cmd_mutex);
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
return -EFAULT;
/* userspace does not want certificate data */
if (!req.certs_len || !req.certs_address)
goto cmd;
if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
!IS_ALIGNED(req.certs_len, PAGE_SIZE))
return -EINVAL;
if (!access_ok((const void __user *)req.certs_address, req.certs_len))
return -EFAULT;
/*
* Initialize the intermediate buffer with all zeros. This buffer
* is used in the guest request message to get the certs blob from
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
memset(snp_dev->certs_data, 0, req.certs_len);
npages = req.certs_len >> PAGE_SHIFT;
cmd:
/*
* The intermediate response buffer is used while decrypting the
* response payload. Make sure that it has enough space to cover the
* authtag.
*/
resp_len = sizeof(resp->data) + crypto->a_len;
resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
if (!resp)
return -ENOMEM;
snp_dev->input.data_npages = npages;
ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
SNP_MSG_REPORT_REQ, &req.data,
sizeof(req.data), resp->data, resp_len);
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
ret = -EFAULT;
}
if (ret)
goto e_free;
if (npages &&
copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
req.certs_len)) {
ret = -EFAULT;
goto e_free;
}
if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
ret = -EFAULT;
e_free:
kfree(resp);
return ret;
}
static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
struct snp_guest_dev *snp_dev = to_snp_dev(file);
void __user *argp = (void __user *)arg;
struct snp_guest_request_ioctl input;
int ret = -ENOTTY;
if (copy_from_user(&input, argp, sizeof(input)))
return -EFAULT;
input.exitinfo2 = 0xff;
/* Message version must be non-zero */
if (!input.msg_version)
return -EINVAL;
mutex_lock(&snp_cmd_mutex);
/* Check if the VMPCK is not empty */
if (is_vmpck_empty(snp_dev)) {
dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
mutex_unlock(&snp_cmd_mutex);
return -ENOTTY;
}
switch (ioctl) {
case SNP_GET_REPORT:
ret = get_report(snp_dev, &input);
break;
case SNP_GET_DERIVED_KEY:
ret = get_derived_key(snp_dev, &input);
break;
case SNP_GET_EXT_REPORT:
ret = get_ext_report(snp_dev, &input);
break;
default:
break;
}
mutex_unlock(&snp_cmd_mutex);
if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input)))
return -EFAULT;
return ret;
}
static void free_shared_pages(void *buf, size_t sz)
{
unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
int ret;
if (!buf)
return;
ret = set_memory_encrypted((unsigned long)buf, npages);
if (ret) {
WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
return;
}
__free_pages(virt_to_page(buf), get_order(sz));
}
static void *alloc_shared_pages(struct device *dev, size_t sz)
{
unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
struct page *page;
int ret;
page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz));
if (!page)
return NULL;
ret = set_memory_decrypted((unsigned long)page_address(page), npages);
if (ret) {
dev_err(dev, "failed to mark page shared, ret=%d\n", ret);
__free_pages(page, get_order(sz));
return NULL;
}
return page_address(page);
}
static const struct file_operations snp_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = snp_guest_ioctl,
};
static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno)
{
u8 *key = NULL;
switch (id) {
case 0:
*seqno = &layout->os_area.msg_seqno_0;
key = layout->vmpck0;
break;
case 1:
*seqno = &layout->os_area.msg_seqno_1;
key = layout->vmpck1;
break;
case 2:
*seqno = &layout->os_area.msg_seqno_2;
key = layout->vmpck2;
break;
case 3:
*seqno = &layout->os_area.msg_seqno_3;
key = layout->vmpck3;
break;
default:
break;
}
return key;
}
static int __init sev_guest_probe(struct platform_device *pdev)
{
struct snp_secrets_page_layout *layout;
struct sev_guest_platform_data *data;
struct device *dev = &pdev->dev;
struct snp_guest_dev *snp_dev;
struct miscdevice *misc;
void __iomem *mapping;
int ret;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
if (!dev->platform_data)
return -ENODEV;
data = (struct sev_guest_platform_data *)dev->platform_data;
mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
if (!mapping)
return -ENODEV;
layout = (__force void *)mapping;
ret = -ENOMEM;
snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
if (!snp_dev)
goto e_unmap;
ret = -EINVAL;
snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno);
if (!snp_dev->vmpck) {
dev_err(dev, "invalid vmpck id %d\n", vmpck_id);
goto e_unmap;
}
/* Verify that VMPCK is not zero. */
if (is_vmpck_empty(snp_dev)) {
dev_err(dev, "vmpck id %d is null\n", vmpck_id);
goto e_unmap;
}
platform_set_drvdata(pdev, snp_dev);
snp_dev->dev = dev;
snp_dev->layout = layout;
/* Allocate the shared page used for the request and response message. */
snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
if (!snp_dev->request)
goto e_unmap;
snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg));
if (!snp_dev->response)
goto e_free_request;
snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE);
if (!snp_dev->certs_data)
goto e_free_response;
ret = -EIO;
snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN);
if (!snp_dev->crypto)
goto e_free_cert_data;
misc = &snp_dev->misc;
misc->minor = MISC_DYNAMIC_MINOR;
misc->name = DEVICE_NAME;
misc->fops = &snp_guest_fops;
/* initial the input address for guest request */
snp_dev->input.req_gpa = __pa(snp_dev->request);
snp_dev->input.resp_gpa = __pa(snp_dev->response);
snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
ret = misc_register(misc);
if (ret)
goto e_free_cert_data;
dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id);
return 0;
e_free_cert_data:
free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
e_free_response:
free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
e_free_request:
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
e_unmap:
iounmap(mapping);
return ret;
}
static int __exit sev_guest_remove(struct platform_device *pdev)
{
struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev);
free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE);
free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg));
free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
deinit_crypto(snp_dev->crypto);
misc_deregister(&snp_dev->misc);
return 0;
}
/*
* This driver is meant to be a common SEV guest interface driver and to
* support any SEV guest API. As such, even though it has been introduced
* with the SEV-SNP support, it is named "sev-guest".
*/
static struct platform_driver sev_guest_driver = {
.remove = __exit_p(sev_guest_remove),
.driver = {
.name = "sev-guest",
},
};
module_platform_driver_probe(sev_guest_driver, sev_guest_probe);
MODULE_AUTHOR("Brijesh Singh <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0.0");
MODULE_DESCRIPTION("AMD SEV Guest Driver");
MODULE_ALIAS("platform:sev-guest");
| linux-master | drivers/virt/coco/sev-guest/sev-guest.c |
// SPDX-License-Identifier: GPL-2.0
/*
* efi_secret module
*
* Copyright (C) 2022 IBM Corporation
* Author: Dov Murik <[email protected]>
*/
/**
* DOC: efi_secret: Allow reading EFI confidential computing (coco) secret area
* via securityfs interface.
*
* When the module is loaded (and securityfs is mounted, typically under
* /sys/kernel/security), a "secrets/coco" directory is created in securityfs.
* In it, a file is created for each secret entry. The name of each such file
* is the GUID of the secret entry, and its content is the secret data.
*/
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/security.h>
#include <linux/efi.h>
#include <linux/cacheflush.h>
#define EFI_SECRET_NUM_FILES 64
struct efi_secret {
struct dentry *secrets_dir;
struct dentry *fs_dir;
struct dentry *fs_files[EFI_SECRET_NUM_FILES];
void __iomem *secret_data;
u64 secret_data_len;
};
/*
* Structure of the EFI secret area
*
* Offset Length
* (bytes) (bytes) Usage
* ------- ------- -----
* 0 16 Secret table header GUID (must be 1e74f542-71dd-4d66-963e-ef4287ff173b)
* 16 4 Length of bytes of the entire secret area
*
* 20 16 First secret entry's GUID
* 36 4 First secret entry's length in bytes (= 16 + 4 + x)
* 40 x First secret entry's data
*
* 40+x 16 Second secret entry's GUID
* 56+x 4 Second secret entry's length in bytes (= 16 + 4 + y)
* 60+x y Second secret entry's data
*
* (... and so on for additional entries)
*
* The GUID of each secret entry designates the usage of the secret data.
*/
/**
* struct secret_header - Header of entire secret area; this should be followed
* by instances of struct secret_entry.
* @guid: Must be EFI_SECRET_TABLE_HEADER_GUID
* @len: Length in bytes of entire secret area, including header
*/
struct secret_header {
efi_guid_t guid;
u32 len;
} __attribute((packed));
/**
* struct secret_entry - Holds one secret entry
* @guid: Secret-specific GUID (or NULL_GUID if this secret entry was deleted)
* @len: Length of secret entry, including its guid and len fields
* @data: The secret data (full of zeros if this secret entry was deleted)
*/
struct secret_entry {
efi_guid_t guid;
u32 len;
u8 data[];
} __attribute((packed));
static size_t secret_entry_data_len(struct secret_entry *e)
{
return e->len - sizeof(*e);
}
static struct efi_secret the_efi_secret;
static inline struct efi_secret *efi_secret_get(void)
{
return &the_efi_secret;
}
static int efi_secret_bin_file_show(struct seq_file *file, void *data)
{
struct secret_entry *e = file->private;
if (e)
seq_write(file, e->data, secret_entry_data_len(e));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(efi_secret_bin_file);
/*
* Overwrite memory content with zeroes, and ensure that dirty cache lines are
* actually written back to memory, to clear out the secret.
*/
static void wipe_memory(void *addr, size_t size)
{
memzero_explicit(addr, size);
#ifdef CONFIG_X86
clflush_cache_range(addr, size);
#endif
}
static int efi_secret_unlink(struct inode *dir, struct dentry *dentry)
{
struct efi_secret *s = efi_secret_get();
struct inode *inode = d_inode(dentry);
struct secret_entry *e = (struct secret_entry *)inode->i_private;
int i;
if (e) {
/* Zero out the secret data */
wipe_memory(e->data, secret_entry_data_len(e));
e->guid = NULL_GUID;
}
inode->i_private = NULL;
for (i = 0; i < EFI_SECRET_NUM_FILES; i++)
if (s->fs_files[i] == dentry)
s->fs_files[i] = NULL;
/*
* securityfs_remove tries to lock the directory's inode, but we reach
* the unlink callback when it's already locked
*/
inode_unlock(dir);
securityfs_remove(dentry);
inode_lock(dir);
return 0;
}
static const struct inode_operations efi_secret_dir_inode_operations = {
.lookup = simple_lookup,
.unlink = efi_secret_unlink,
};
static int efi_secret_map_area(struct platform_device *dev)
{
int ret;
struct efi_secret *s = efi_secret_get();
struct linux_efi_coco_secret_area *secret_area;
if (efi.coco_secret == EFI_INVALID_TABLE_ADDR) {
dev_err(&dev->dev, "Secret area address is not available\n");
return -EINVAL;
}
secret_area = memremap(efi.coco_secret, sizeof(*secret_area), MEMREMAP_WB);
if (secret_area == NULL) {
dev_err(&dev->dev, "Could not map secret area EFI config entry\n");
return -ENOMEM;
}
if (!secret_area->base_pa || secret_area->size < sizeof(struct secret_header)) {
dev_err(&dev->dev,
"Invalid secret area memory location (base_pa=0x%llx size=0x%llx)\n",
secret_area->base_pa, secret_area->size);
ret = -EINVAL;
goto unmap;
}
s->secret_data = ioremap_encrypted(secret_area->base_pa, secret_area->size);
if (s->secret_data == NULL) {
dev_err(&dev->dev, "Could not map secret area\n");
ret = -ENOMEM;
goto unmap;
}
s->secret_data_len = secret_area->size;
ret = 0;
unmap:
memunmap(secret_area);
return ret;
}
static void efi_secret_securityfs_teardown(struct platform_device *dev)
{
struct efi_secret *s = efi_secret_get();
int i;
for (i = (EFI_SECRET_NUM_FILES - 1); i >= 0; i--) {
securityfs_remove(s->fs_files[i]);
s->fs_files[i] = NULL;
}
securityfs_remove(s->fs_dir);
s->fs_dir = NULL;
securityfs_remove(s->secrets_dir);
s->secrets_dir = NULL;
dev_dbg(&dev->dev, "Removed securityfs entries\n");
}
static int efi_secret_securityfs_setup(struct platform_device *dev)
{
struct efi_secret *s = efi_secret_get();
int ret = 0, i = 0, bytes_left;
unsigned char *ptr;
struct secret_header *h;
struct secret_entry *e;
struct dentry *dent;
char guid_str[EFI_VARIABLE_GUID_LEN + 1];
ptr = (void __force *)s->secret_data;
h = (struct secret_header *)ptr;
if (efi_guidcmp(h->guid, EFI_SECRET_TABLE_HEADER_GUID)) {
/*
* This is not an error: it just means that EFI defines secret
* area but it was not populated by the Guest Owner.
*/
dev_dbg(&dev->dev, "EFI secret area does not start with correct GUID\n");
return -ENODEV;
}
if (h->len < sizeof(*h)) {
dev_err(&dev->dev, "EFI secret area reported length is too small\n");
return -EINVAL;
}
if (h->len > s->secret_data_len) {
dev_err(&dev->dev, "EFI secret area reported length is too big\n");
return -EINVAL;
}
s->secrets_dir = NULL;
s->fs_dir = NULL;
memset(s->fs_files, 0, sizeof(s->fs_files));
dent = securityfs_create_dir("secrets", NULL);
if (IS_ERR(dent)) {
dev_err(&dev->dev, "Error creating secrets securityfs directory entry err=%ld\n",
PTR_ERR(dent));
return PTR_ERR(dent);
}
s->secrets_dir = dent;
dent = securityfs_create_dir("coco", s->secrets_dir);
if (IS_ERR(dent)) {
dev_err(&dev->dev, "Error creating coco securityfs directory entry err=%ld\n",
PTR_ERR(dent));
return PTR_ERR(dent);
}
d_inode(dent)->i_op = &efi_secret_dir_inode_operations;
s->fs_dir = dent;
bytes_left = h->len - sizeof(*h);
ptr += sizeof(*h);
while (bytes_left >= (int)sizeof(*e) && i < EFI_SECRET_NUM_FILES) {
e = (struct secret_entry *)ptr;
if (e->len < sizeof(*e) || e->len > (unsigned int)bytes_left) {
dev_err(&dev->dev, "EFI secret area is corrupted\n");
ret = -EINVAL;
goto err_cleanup;
}
/* Skip deleted entries (which will have NULL_GUID) */
if (efi_guidcmp(e->guid, NULL_GUID)) {
efi_guid_to_str(&e->guid, guid_str);
dent = securityfs_create_file(guid_str, 0440, s->fs_dir, (void *)e,
&efi_secret_bin_file_fops);
if (IS_ERR(dent)) {
dev_err(&dev->dev, "Error creating efi_secret securityfs entry\n");
ret = PTR_ERR(dent);
goto err_cleanup;
}
s->fs_files[i++] = dent;
}
ptr += e->len;
bytes_left -= e->len;
}
dev_info(&dev->dev, "Created %d entries in securityfs secrets/coco\n", i);
return 0;
err_cleanup:
efi_secret_securityfs_teardown(dev);
return ret;
}
static void efi_secret_unmap_area(void)
{
struct efi_secret *s = efi_secret_get();
if (s->secret_data) {
iounmap(s->secret_data);
s->secret_data = NULL;
s->secret_data_len = 0;
}
}
static int efi_secret_probe(struct platform_device *dev)
{
int ret;
ret = efi_secret_map_area(dev);
if (ret)
return ret;
ret = efi_secret_securityfs_setup(dev);
if (ret)
goto err_unmap;
return ret;
err_unmap:
efi_secret_unmap_area();
return ret;
}
static int efi_secret_remove(struct platform_device *dev)
{
efi_secret_securityfs_teardown(dev);
efi_secret_unmap_area();
return 0;
}
static struct platform_driver efi_secret_driver = {
.probe = efi_secret_probe,
.remove = efi_secret_remove,
.driver = {
.name = "efi_secret",
},
};
module_platform_driver(efi_secret_driver);
MODULE_DESCRIPTION("Confidential computing EFI secret area access");
MODULE_AUTHOR("IBM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:efi_secret");
| linux-master | drivers/virt/coco/efi_secret/efi_secret.c |
// SPDX-License-Identifier: GPL-2.0
/*
* TDX guest user interface driver
*
* Copyright (C) 2022 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <uapi/linux/tdx-guest.h>
#include <asm/cpu_device_id.h>
#include <asm/tdx.h>
static long tdx_get_report0(struct tdx_report_req __user *req)
{
u8 *reportdata, *tdreport;
long ret;
reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
if (!reportdata)
return -ENOMEM;
tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
if (!tdreport) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user(reportdata, req->reportdata, TDX_REPORTDATA_LEN)) {
ret = -EFAULT;
goto out;
}
/* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
ret = tdx_mcall_get_report0(reportdata, tdreport);
if (ret)
goto out;
if (copy_to_user(req->tdreport, tdreport, TDX_REPORT_LEN))
ret = -EFAULT;
out:
kfree(reportdata);
kfree(tdreport);
return ret;
}
static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case TDX_CMD_GET_REPORT0:
return tdx_get_report0((struct tdx_report_req __user *)arg);
default:
return -ENOTTY;
}
}
static const struct file_operations tdx_guest_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tdx_guest_ioctl,
.llseek = no_llseek,
};
static struct miscdevice tdx_misc_dev = {
.name = KBUILD_MODNAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &tdx_guest_fops,
};
static const struct x86_cpu_id tdx_guest_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_TDX_GUEST, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
static int __init tdx_guest_init(void)
{
if (!x86_match_cpu(tdx_guest_ids))
return -ENODEV;
return misc_register(&tdx_misc_dev);
}
module_init(tdx_guest_init);
static void __exit tdx_guest_exit(void)
{
misc_deregister(&tdx_misc_dev);
}
module_exit(tdx_guest_exit);
MODULE_AUTHOR("Kuppuswamy Sathyanarayanan <[email protected]>");
MODULE_DESCRIPTION("TDX Guest Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/virt/coco/tdx-guest/tdx-guest.c |
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
/*
* vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
* VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
*
* Copyright (C) 2006-2016 Oracle Corporation
*/
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/vbox_err.h>
#include <linux/vbox_utils.h>
#include "vboxguest_core.h"
/* Get the pointer to the first parameter of a HGCM call request. */
#define VMMDEV_HGCM_CALL_PARMS(a) \
((struct vmmdev_hgcm_function_parameter *)( \
(u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
/* The max parameter buffer size for a user request. */
#define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
/* The max parameter buffer size for a kernel request. */
#define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
#define VBG_DEBUG_PORT 0x504
/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
static DEFINE_SPINLOCK(vbg_log_lock);
static char vbg_log_buf[128];
#define VBG_LOG(name, pr_func) \
void name(const char *fmt, ...) \
{ \
unsigned long flags; \
va_list args; \
int i, count; \
\
va_start(args, fmt); \
spin_lock_irqsave(&vbg_log_lock, flags); \
\
count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
for (i = 0; i < count; i++) \
outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
\
pr_func("%s", vbg_log_buf); \
\
spin_unlock_irqrestore(&vbg_log_lock, flags); \
va_end(args); \
} \
EXPORT_SYMBOL(name)
VBG_LOG(vbg_info, pr_info);
VBG_LOG(vbg_warn, pr_warn);
VBG_LOG(vbg_err, pr_err);
VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
VBG_LOG(vbg_debug, pr_debug);
#endif
void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
u32 requestor)
{
struct vmmdev_request_header *req;
int order = get_order(PAGE_ALIGN(len));
req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
if (!req)
return NULL;
memset(req, 0xaa, len);
req->size = len;
req->version = VMMDEV_REQUEST_HEADER_VERSION;
req->request_type = req_type;
req->rc = VERR_GENERAL_FAILURE;
req->reserved1 = 0;
req->requestor = requestor;
return req;
}
void vbg_req_free(void *req, size_t len)
{
if (!req)
return;
free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
}
/* Note this function returns a VBox status code, not a negative errno!! */
int vbg_req_perform(struct vbg_dev *gdev, void *req)
{
unsigned long phys_req = virt_to_phys(req);
outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
/*
* The host changes the request as a result of the outl, make sure
* the outl and any reads of the req happen in the correct order.
*/
mb();
return ((struct vmmdev_request_header *)req)->rc;
}
static bool hgcm_req_done(struct vbg_dev *gdev,
struct vmmdev_hgcmreq_header *header)
{
unsigned long flags;
bool done;
spin_lock_irqsave(&gdev->event_spinlock, flags);
done = header->flags & VMMDEV_HGCM_REQ_DONE;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
return done;
}
int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status)
{
struct vmmdev_hgcm_connect *hgcm_connect = NULL;
int rc;
hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
VMMDEVREQ_HGCM_CONNECT, requestor);
if (!hgcm_connect)
return -ENOMEM;
hgcm_connect->header.flags = 0;
memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
hgcm_connect->client_id = 0;
rc = vbg_req_perform(gdev, hgcm_connect);
if (rc == VINF_HGCM_ASYNC_EXECUTE)
wait_event(gdev->hgcm_wq,
hgcm_req_done(gdev, &hgcm_connect->header));
if (rc >= 0) {
*client_id = hgcm_connect->client_id;
rc = hgcm_connect->header.result;
}
vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
*vbox_status = rc;
return 0;
}
EXPORT_SYMBOL(vbg_hgcm_connect);
int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
u32 client_id, int *vbox_status)
{
struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
int rc;
hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
VMMDEVREQ_HGCM_DISCONNECT,
requestor);
if (!hgcm_disconnect)
return -ENOMEM;
hgcm_disconnect->header.flags = 0;
hgcm_disconnect->client_id = client_id;
rc = vbg_req_perform(gdev, hgcm_disconnect);
if (rc == VINF_HGCM_ASYNC_EXECUTE)
wait_event(gdev->hgcm_wq,
hgcm_req_done(gdev, &hgcm_disconnect->header));
if (rc >= 0)
rc = hgcm_disconnect->header.result;
vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
*vbox_status = rc;
return 0;
}
EXPORT_SYMBOL(vbg_hgcm_disconnect);
static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
{
u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
return size >> PAGE_SHIFT;
}
static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
{
u32 page_count;
page_count = hgcm_call_buf_size_in_pages(buf, len);
*extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
}
static int hgcm_call_preprocess_linaddr(
const struct vmmdev_hgcm_function_parameter *src_parm,
void **bounce_buf_ret, size_t *extra)
{
void *buf, *bounce_buf;
bool copy_in;
u32 len;
int ret;
buf = (void *)src_parm->u.pointer.u.linear_addr;
len = src_parm->u.pointer.size;
copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
if (len > VBG_MAX_HGCM_USER_PARM)
return -E2BIG;
bounce_buf = kvmalloc(len, GFP_KERNEL);
if (!bounce_buf)
return -ENOMEM;
*bounce_buf_ret = bounce_buf;
if (copy_in) {
ret = copy_from_user(bounce_buf, (void __user *)buf, len);
if (ret)
return -EFAULT;
} else {
memset(bounce_buf, 0, len);
}
hgcm_call_add_pagelist_size(bounce_buf, len, extra);
return 0;
}
/**
* Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
* figure out how much extra storage we need for page lists.
* Return: 0 or negative errno value.
* @src_parm: Pointer to source function call parameters
* @parm_count: Number of function call parameters.
* @bounce_bufs_ret: Where to return the allocated bouncebuffer array
* @extra: Where to return the extra request space needed for
* physical page lists.
*/
static int hgcm_call_preprocess(
const struct vmmdev_hgcm_function_parameter *src_parm,
u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
{
void *buf, **bounce_bufs = NULL;
u32 i, len;
int ret;
for (i = 0; i < parm_count; i++, src_parm++) {
switch (src_parm->type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
case VMMDEV_HGCM_PARM_TYPE_64BIT:
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
if (!bounce_bufs) {
bounce_bufs = kcalloc(parm_count,
sizeof(void *),
GFP_KERNEL);
if (!bounce_bufs)
return -ENOMEM;
*bounce_bufs_ret = bounce_bufs;
}
ret = hgcm_call_preprocess_linaddr(src_parm,
&bounce_bufs[i],
extra);
if (ret)
return ret;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
buf = (void *)src_parm->u.pointer.u.linear_addr;
len = src_parm->u.pointer.size;
if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
return -E2BIG;
hgcm_call_add_pagelist_size(buf, len, extra);
break;
default:
return -EINVAL;
}
}
return 0;
}
/**
* Translates linear address types to page list direction flags.
*
* Return: page list flags.
* @type: The type.
*/
static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
enum vmmdev_hgcm_function_parameter_type type)
{
switch (type) {
default:
WARN_ON(1);
fallthrough;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
}
}
static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
{
struct vmmdev_hgcm_pagelist *dst_pg_lst;
struct page *page;
bool is_vmalloc;
u32 i, page_count;
dst_parm->type = type;
if (len == 0) {
dst_parm->u.pointer.size = 0;
dst_parm->u.pointer.u.linear_addr = 0;
return;
}
dst_pg_lst = (void *)call + *off_extra;
page_count = hgcm_call_buf_size_in_pages(buf, len);
is_vmalloc = is_vmalloc_addr(buf);
dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
dst_parm->u.page_list.size = len;
dst_parm->u.page_list.offset = *off_extra;
dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
dst_pg_lst->page_count = page_count;
for (i = 0; i < page_count; i++) {
if (is_vmalloc)
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
dst_pg_lst->pages[i] = page_to_phys(page);
buf += PAGE_SIZE;
}
*off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
}
/**
* Initializes the call request that we're sending to the host.
* @call: The call to initialize.
* @client_id: The client ID of the caller.
* @function: The function number of the function to call.
* @src_parm: Pointer to source function call parameters.
* @parm_count: Number of function call parameters.
* @bounce_bufs: The bouncebuffer array.
*/
static void hgcm_call_init_call(
struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
const struct vmmdev_hgcm_function_parameter *src_parm,
u32 parm_count, void **bounce_bufs)
{
struct vmmdev_hgcm_function_parameter *dst_parm =
VMMDEV_HGCM_CALL_PARMS(call);
u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
void *buf;
call->header.flags = 0;
call->header.result = VINF_SUCCESS;
call->client_id = client_id;
call->function = function;
call->parm_count = parm_count;
for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
switch (src_parm->type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
case VMMDEV_HGCM_PARM_TYPE_64BIT:
*dst_parm = *src_parm;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
src_parm->u.pointer.size,
src_parm->type, &off_extra);
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
buf = (void *)src_parm->u.pointer.u.linear_addr;
hgcm_call_init_linaddr(call, dst_parm, buf,
src_parm->u.pointer.size,
src_parm->type, &off_extra);
break;
default:
WARN_ON(1);
dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
}
}
}
/**
* Tries to cancel a pending HGCM call.
*
* Return: VBox status code
*/
static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
{
int rc;
/*
* We use a pre-allocated request for cancellations, which is
* protected by cancel_req_mutex. This means that all cancellations
* get serialized, this should be fine since they should be rare.
*/
mutex_lock(&gdev->cancel_req_mutex);
gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
rc = vbg_req_perform(gdev, gdev->cancel_req);
mutex_unlock(&gdev->cancel_req_mutex);
if (rc == VERR_NOT_IMPLEMENTED) {
call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
rc = vbg_req_perform(gdev, call);
if (rc == VERR_INVALID_PARAMETER)
rc = VERR_NOT_FOUND;
}
if (rc >= 0)
call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
return rc;
}
/**
* Performs the call and completion wait.
* Return: 0 or negative errno value.
* @gdev: The VBoxGuest device extension.
* @call: The call to execute.
* @timeout_ms: Timeout in ms.
* @leak_it: Where to return the leak it / free it, indicator.
* Cancellation fun.
*/
static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
u32 timeout_ms, bool interruptible, bool *leak_it)
{
int rc, cancel_rc, ret;
long timeout;
*leak_it = false;
rc = vbg_req_perform(gdev, call);
/*
* If the call failed, then pretend success. Upper layers will
* interpret the result code in the packet.
*/
if (rc < 0) {
call->header.result = rc;
return 0;
}
if (rc != VINF_HGCM_ASYNC_EXECUTE)
return 0;
/* Host decided to process the request asynchronously, wait for it */
if (timeout_ms == U32_MAX)
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(timeout_ms);
if (interruptible) {
timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
hgcm_req_done(gdev, &call->header),
timeout);
} else {
timeout = wait_event_timeout(gdev->hgcm_wq,
hgcm_req_done(gdev, &call->header),
timeout);
}
/* timeout > 0 means hgcm_req_done has returned true, so success */
if (timeout > 0)
return 0;
if (timeout == 0)
ret = -ETIMEDOUT;
else
ret = -EINTR;
/* Cancel the request */
cancel_rc = hgcm_cancel_call(gdev, call);
if (cancel_rc >= 0)
return ret;
/*
* Failed to cancel, this should mean that the cancel has lost the
* race with normal completion, wait while the host completes it.
*/
if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
timeout = msecs_to_jiffies(500);
else
timeout = msecs_to_jiffies(2000);
timeout = wait_event_timeout(gdev->hgcm_wq,
hgcm_req_done(gdev, &call->header),
timeout);
if (WARN_ON(timeout == 0)) {
/* We really should never get here */
vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
__func__);
*leak_it = true;
return ret;
}
/* The call has completed normally after all */
return 0;
}
/**
* Copies the result of the call back to the caller info structure and user
* buffers.
* Return: 0 or negative errno value.
* @call: HGCM call request.
* @dst_parm: Pointer to function call parameters destination.
* @parm_count: Number of function call parameters.
* @bounce_bufs: The bouncebuffer array.
*/
static int hgcm_call_copy_back_result(
const struct vmmdev_hgcm_call *call,
struct vmmdev_hgcm_function_parameter *dst_parm,
u32 parm_count, void **bounce_bufs)
{
const struct vmmdev_hgcm_function_parameter *src_parm =
VMMDEV_HGCM_CALL_PARMS(call);
void __user *p;
int ret;
u32 i;
/* Copy back parameters. */
for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
switch (dst_parm->type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
case VMMDEV_HGCM_PARM_TYPE_64BIT:
*dst_parm = *src_parm;
break;
case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
dst_parm->u.page_list.size = src_parm->u.page_list.size;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
dst_parm->u.pointer.size = src_parm->u.pointer.size;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
dst_parm->u.pointer.size = src_parm->u.pointer.size;
p = (void __user *)dst_parm->u.pointer.u.linear_addr;
ret = copy_to_user(p, bounce_bufs[i],
min(src_parm->u.pointer.size,
dst_parm->u.pointer.size));
if (ret)
return -EFAULT;
break;
default:
WARN_ON(1);
return -EINVAL;
}
}
return 0;
}
int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
u32 function, u32 timeout_ms,
struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
int *vbox_status)
{
struct vmmdev_hgcm_call *call;
void **bounce_bufs = NULL;
bool leak_it;
size_t size;
int i, ret;
size = sizeof(struct vmmdev_hgcm_call) +
parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
/*
* Validate and buffer the parameters for the call. This also increases
* call_size with the amount of extra space needed for page lists.
*/
ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
if (ret) {
/* Even on error bounce bufs may still have been allocated */
goto free_bounce_bufs;
}
call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
if (!call) {
ret = -ENOMEM;
goto free_bounce_bufs;
}
hgcm_call_init_call(call, client_id, function, parms, parm_count,
bounce_bufs);
ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
if (ret == 0) {
*vbox_status = call->header.result;
ret = hgcm_call_copy_back_result(call, parms, parm_count,
bounce_bufs);
}
if (!leak_it)
vbg_req_free(call, size);
free_bounce_bufs:
if (bounce_bufs) {
for (i = 0; i < parm_count; i++)
kvfree(bounce_bufs[i]);
kfree(bounce_bufs);
}
return ret;
}
EXPORT_SYMBOL(vbg_hgcm_call);
#ifdef CONFIG_COMPAT
int vbg_hgcm_call32(
struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
u32 parm_count, int *vbox_status)
{
struct vmmdev_hgcm_function_parameter *parm64 = NULL;
u32 i, size;
int ret = 0;
/* KISS allocate a temporary request and convert the parameters. */
size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
parm64 = kzalloc(size, GFP_KERNEL);
if (!parm64)
return -ENOMEM;
for (i = 0; i < parm_count; i++) {
switch (parm32[i].type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
parm64[i].u.value32 = parm32[i].u.value32;
break;
case VMMDEV_HGCM_PARM_TYPE_64BIT:
parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
parm64[i].u.value64 = parm32[i].u.value64;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
parm64[i].type = parm32[i].type;
parm64[i].u.pointer.size = parm32[i].u.pointer.size;
parm64[i].u.pointer.u.linear_addr =
parm32[i].u.pointer.u.linear_addr;
break;
default:
ret = -EINVAL;
}
if (ret < 0)
goto out_free;
}
ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
parm64, parm_count, vbox_status);
if (ret < 0)
goto out_free;
/* Copy back. */
for (i = 0; i < parm_count; i++, parm32++, parm64++) {
switch (parm64[i].type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
parm32[i].u.value32 = parm64[i].u.value32;
break;
case VMMDEV_HGCM_PARM_TYPE_64BIT:
parm32[i].u.value64 = parm64[i].u.value64;
break;
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
parm32[i].u.pointer.size = parm64[i].u.pointer.size;
break;
default:
WARN_ON(1);
ret = -EINVAL;
}
}
out_free:
kfree(parm64);
return ret;
}
#endif
static const int vbg_status_code_to_errno_table[] = {
[-VERR_ACCESS_DENIED] = -EPERM,
[-VERR_FILE_NOT_FOUND] = -ENOENT,
[-VERR_PROCESS_NOT_FOUND] = -ESRCH,
[-VERR_INTERRUPTED] = -EINTR,
[-VERR_DEV_IO_ERROR] = -EIO,
[-VERR_TOO_MUCH_DATA] = -E2BIG,
[-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
[-VERR_INVALID_HANDLE] = -EBADF,
[-VERR_TRY_AGAIN] = -EAGAIN,
[-VERR_NO_MEMORY] = -ENOMEM,
[-VERR_INVALID_POINTER] = -EFAULT,
[-VERR_RESOURCE_BUSY] = -EBUSY,
[-VERR_ALREADY_EXISTS] = -EEXIST,
[-VERR_NOT_SAME_DEVICE] = -EXDEV,
[-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
[-VERR_PATH_NOT_FOUND] = -ENOTDIR,
[-VERR_INVALID_NAME] = -ENOENT,
[-VERR_IS_A_DIRECTORY] = -EISDIR,
[-VERR_INVALID_PARAMETER] = -EINVAL,
[-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
[-VERR_INVALID_FUNCTION] = -ENOTTY,
[-VERR_SHARING_VIOLATION] = -ETXTBSY,
[-VERR_FILE_TOO_BIG] = -EFBIG,
[-VERR_DISK_FULL] = -ENOSPC,
[-VERR_SEEK_ON_DEVICE] = -ESPIPE,
[-VERR_WRITE_PROTECT] = -EROFS,
[-VERR_BROKEN_PIPE] = -EPIPE,
[-VERR_DEADLOCK] = -EDEADLK,
[-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
[-VERR_FILE_LOCK_FAILED] = -ENOLCK,
[-VERR_NOT_IMPLEMENTED] = -ENOSYS,
[-VERR_NOT_SUPPORTED] = -ENOSYS,
[-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
[-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
[-VERR_NO_MORE_FILES] = -ENODATA,
[-VERR_NO_DATA] = -ENODATA,
[-VERR_NET_NO_NETWORK] = -ENONET,
[-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
[-VERR_NO_TRANSLATION] = -EILSEQ,
[-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
[-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
[-VERR_NET_MSG_SIZE] = -EMSGSIZE,
[-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
[-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
[-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
[-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
[-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
[-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
[-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
[-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
[-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
[-VERR_NET_DOWN] = -ENETDOWN,
[-VERR_NET_UNREACHABLE] = -ENETUNREACH,
[-VERR_NET_CONNECTION_RESET] = -ENETRESET,
[-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
[-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
[-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
[-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
[-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
[-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
[-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
[-VERR_TIMEOUT] = -ETIMEDOUT,
[-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
[-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
[-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
[-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
[-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
[-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
[-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
};
int vbg_status_code_to_errno(int rc)
{
if (rc >= 0)
return 0;
rc = -rc;
if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
vbg_status_code_to_errno_table[rc] == 0) {
vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
return -EPROTO;
}
return vbg_status_code_to_errno_table[rc];
}
EXPORT_SYMBOL(vbg_status_code_to_errno);
| linux-master | drivers/virt/vboxguest/vboxguest_utils.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* vboxguest linux pci driver, char-dev and input-device code,
*
* Copyright (C) 2006-2016 Oracle Corporation
*/
#include <linux/cred.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/vbox_utils.h>
#include "vboxguest_core.h"
/** The device name. */
#define DEVICE_NAME "vboxguest"
/** The device name for the device node open to everyone. */
#define DEVICE_NAME_USER "vboxuser"
/** VirtualBox PCI vendor ID. */
#define VBOX_VENDORID 0x80ee
/** VMMDev PCI card product ID. */
#define VMMDEV_DEVICEID 0xcafe
/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
static u32 vbg_misc_device_requestor(struct inode *inode)
{
u32 requestor = VMMDEV_REQUESTOR_USERMODE |
VMMDEV_REQUESTOR_CON_DONT_KNOW |
VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
if (from_kuid(current_user_ns(), current_uid()) == 0)
requestor |= VMMDEV_REQUESTOR_USR_ROOT;
else
requestor |= VMMDEV_REQUESTOR_USR_USER;
if (in_egroup_p(inode->i_gid))
requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
return requestor;
}
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
struct vbg_dev *gdev;
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
if (IS_ERR(session))
return PTR_ERR(session);
filp->private_data = session;
return 0;
}
static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
struct vbg_dev *gdev;
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
VMMDEV_REQUESTOR_USER_DEVICE);
if (IS_ERR(session))
return PTR_ERR(session);
filp->private_data = session;
return 0;
}
/**
* Close device.
* Return: 0 on success, negated errno on failure.
* @inode: Pointer to inode info structure.
* @filp: Associated file pointer.
*/
static int vbg_misc_device_close(struct inode *inode, struct file *filp)
{
vbg_core_close_session(filp->private_data);
filp->private_data = NULL;
return 0;
}
/**
* Device I/O Control entry point.
* Return: 0 on success, negated errno on failure.
* @filp: Associated file pointer.
* @req: The request specified to ioctl().
* @arg: The argument specified to ioctl().
*/
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
unsigned long arg)
{
struct vbg_session *session = filp->private_data;
size_t returned_size, size;
struct vbg_ioctl_hdr hdr;
bool is_vmmdev_req;
int ret = 0;
void *buf;
if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
return -EFAULT;
if (hdr.version != VBG_IOCTL_HDR_VERSION)
return -EINVAL;
if (hdr.size_in < sizeof(hdr) ||
(hdr.size_out && hdr.size_out < sizeof(hdr)))
return -EINVAL;
size = max(hdr.size_in, hdr.size_out);
if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
return -EINVAL;
if (size > SZ_16M)
return -E2BIG;
/*
* IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
* the need for a bounce-buffer and another copy later on.
*/
is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
if (is_vmmdev_req)
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
session->requestor);
else
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
*((struct vbg_ioctl_hdr *)buf) = hdr;
if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr),
hdr.size_in - sizeof(hdr))) {
ret = -EFAULT;
goto out;
}
if (hdr.size_in < size)
memset(buf + hdr.size_in, 0, size - hdr.size_in);
ret = vbg_core_ioctl(session, req, buf);
if (ret)
goto out;
returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
if (returned_size > size) {
vbg_debug("%s: too much output data %zu > %zu\n",
__func__, returned_size, size);
returned_size = size;
}
if (copy_to_user((void *)arg, buf, returned_size) != 0)
ret = -EFAULT;
out:
if (is_vmmdev_req)
vbg_req_free(buf, size);
else
kfree(buf);
return ret;
}
/** The file_operations structures. */
static const struct file_operations vbg_misc_device_fops = {
.owner = THIS_MODULE,
.open = vbg_misc_device_open,
.release = vbg_misc_device_close,
.unlocked_ioctl = vbg_misc_device_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vbg_misc_device_ioctl,
#endif
};
static const struct file_operations vbg_misc_device_user_fops = {
.owner = THIS_MODULE,
.open = vbg_misc_device_user_open,
.release = vbg_misc_device_close,
.unlocked_ioctl = vbg_misc_device_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vbg_misc_device_ioctl,
#endif
};
/**
* Called when the input device is first opened.
*
* Sets up absolute mouse reporting.
*/
static int vbg_input_open(struct input_dev *input)
{
struct vbg_dev *gdev = input_get_drvdata(input);
u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
return vbg_core_set_mouse_status(gdev, feat);
}
/**
* Called if all open handles to the input device are closed.
*
* Disables absolute reporting.
*/
static void vbg_input_close(struct input_dev *input)
{
struct vbg_dev *gdev = input_get_drvdata(input);
vbg_core_set_mouse_status(gdev, 0);
}
/**
* Creates the kernel input device.
*
* Return: 0 on success, negated errno on failure.
*/
static int vbg_create_input_device(struct vbg_dev *gdev)
{
struct input_dev *input;
input = devm_input_allocate_device(gdev->dev);
if (!input)
return -ENOMEM;
input->id.bustype = BUS_PCI;
input->id.vendor = VBOX_VENDORID;
input->id.product = VMMDEV_DEVICEID;
input->open = vbg_input_open;
input->close = vbg_input_close;
input->dev.parent = gdev->dev;
input->name = "VirtualBox mouse integration";
input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
VMMDEV_MOUSE_RANGE_MAX, 0, 0);
input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
VMMDEV_MOUSE_RANGE_MAX, 0, 0);
input_set_capability(input, EV_KEY, BTN_MOUSE);
input_set_drvdata(input, gdev);
gdev->input = input;
return input_register_device(gdev->input);
}
static ssize_t host_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vbg_dev *gdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", gdev->host_version);
}
static ssize_t host_features_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vbg_dev *gdev = dev_get_drvdata(dev);
return sprintf(buf, "%#x\n", gdev->host_features);
}
static DEVICE_ATTR_RO(host_version);
static DEVICE_ATTR_RO(host_features);
static struct attribute *vbg_pci_attrs[] = {
&dev_attr_host_version.attr,
&dev_attr_host_features.attr,
NULL,
};
ATTRIBUTE_GROUPS(vbg_pci);
/**
* Does the PCI detection and init of the device.
*
* Return: 0 on success, negated errno on failure.
*/
static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
resource_size_t io, io_len, mmio, mmio_len;
struct vmmdev_memory *vmmdev;
struct vbg_dev *gdev;
int ret;
gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
ret = pci_enable_device(pci);
if (ret != 0) {
vbg_err("vboxguest: Error enabling device: %d\n", ret);
return ret;
}
ret = -ENODEV;
io = pci_resource_start(pci, 0);
io_len = pci_resource_len(pci, 0);
if (!io || !io_len) {
vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
goto err_disable_pcidev;
}
if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
vbg_err("vboxguest: Error could not claim IO resource\n");
ret = -EBUSY;
goto err_disable_pcidev;
}
mmio = pci_resource_start(pci, 1);
mmio_len = pci_resource_len(pci, 1);
if (!mmio || !mmio_len) {
vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
goto err_disable_pcidev;
}
if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
vbg_err("vboxguest: Error could not claim MMIO resource\n");
ret = -EBUSY;
goto err_disable_pcidev;
}
vmmdev = devm_ioremap(dev, mmio, mmio_len);
if (!vmmdev) {
vbg_err("vboxguest: Error ioremap failed; MMIO addr=%pap size=%pap\n",
&mmio, &mmio_len);
goto err_disable_pcidev;
}
/* Validate MMIO region version and size. */
if (vmmdev->version != VMMDEV_MEMORY_VERSION ||
vmmdev->size < 32 || vmmdev->size > mmio_len) {
vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n",
vmmdev->version, VMMDEV_MEMORY_VERSION,
vmmdev->size, (int)mmio_len);
goto err_disable_pcidev;
}
gdev->io_port = io;
gdev->mmio = vmmdev;
gdev->dev = dev;
gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
gdev->misc_device.name = DEVICE_NAME;
gdev->misc_device.fops = &vbg_misc_device_fops;
gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
gdev->misc_device_user.name = DEVICE_NAME_USER;
gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
if (ret)
goto err_disable_pcidev;
ret = vbg_create_input_device(gdev);
if (ret) {
vbg_err("vboxguest: Error creating input device: %d\n", ret);
goto err_vbg_core_exit;
}
ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
gdev);
if (ret) {
vbg_err("vboxguest: Error requesting irq: %d\n", ret);
goto err_vbg_core_exit;
}
ret = misc_register(&gdev->misc_device);
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME, ret);
goto err_free_irq;
}
ret = misc_register(&gdev->misc_device_user);
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME_USER, ret);
goto err_unregister_misc_device;
}
mutex_lock(&vbg_gdev_mutex);
if (!vbg_gdev)
vbg_gdev = gdev;
else
ret = -EBUSY;
mutex_unlock(&vbg_gdev_mutex);
if (ret) {
vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
goto err_unregister_misc_device_user;
}
pci_set_drvdata(pci, gdev);
return 0;
err_unregister_misc_device_user:
misc_deregister(&gdev->misc_device_user);
err_unregister_misc_device:
misc_deregister(&gdev->misc_device);
err_free_irq:
free_irq(pci->irq, gdev);
err_vbg_core_exit:
vbg_core_exit(gdev);
err_disable_pcidev:
pci_disable_device(pci);
return ret;
}
static void vbg_pci_remove(struct pci_dev *pci)
{
struct vbg_dev *gdev = pci_get_drvdata(pci);
mutex_lock(&vbg_gdev_mutex);
vbg_gdev = NULL;
mutex_unlock(&vbg_gdev_mutex);
free_irq(pci->irq, gdev);
misc_deregister(&gdev->misc_device_user);
misc_deregister(&gdev->misc_device);
vbg_core_exit(gdev);
pci_disable_device(pci);
}
struct vbg_dev *vbg_get_gdev(void)
{
mutex_lock(&vbg_gdev_mutex);
/*
* Note on success we keep the mutex locked until vbg_put_gdev(),
* this stops vbg_pci_remove from removing the device from underneath
* vboxsf. vboxsf will only hold a reference for a short while.
*/
if (vbg_gdev)
return vbg_gdev;
mutex_unlock(&vbg_gdev_mutex);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(vbg_get_gdev);
void vbg_put_gdev(struct vbg_dev *gdev)
{
WARN_ON(gdev != vbg_gdev);
mutex_unlock(&vbg_gdev_mutex);
}
EXPORT_SYMBOL(vbg_put_gdev);
/**
* Callback for mouse events.
*
* This is called at the end of the ISR, after leaving the event spinlock, if
* VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
*
* @gdev: The device extension.
*/
void vbg_linux_mouse_event(struct vbg_dev *gdev)
{
int rc;
/* Report events to the kernel input device */
gdev->mouse_status_req->mouse_features = 0;
gdev->mouse_status_req->pointer_pos_x = 0;
gdev->mouse_status_req->pointer_pos_y = 0;
rc = vbg_req_perform(gdev, gdev->mouse_status_req);
if (rc >= 0) {
input_report_abs(gdev->input, ABS_X,
gdev->mouse_status_req->pointer_pos_x);
input_report_abs(gdev->input, ABS_Y,
gdev->mouse_status_req->pointer_pos_y);
input_sync(gdev->input);
}
}
static const struct pci_device_id vbg_pci_ids[] = {
{ .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
{}
};
MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
static struct pci_driver vbg_pci_driver = {
.name = DEVICE_NAME,
.dev_groups = vbg_pci_groups,
.id_table = vbg_pci_ids,
.probe = vbg_pci_probe,
.remove = vbg_pci_remove,
};
module_pci_driver(vbg_pci_driver);
MODULE_AUTHOR("Oracle Corporation");
MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
MODULE_LICENSE("GPL");
| linux-master | drivers/virt/vboxguest/vboxguest_linux.c |
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
/*
* vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
*
* Copyright (C) 2007-2016 Oracle Corporation
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vbox_err.h>
#include <linux/vbox_utils.h>
#include <linux/vmalloc.h>
#include "vboxguest_core.h"
#include "vboxguest_version.h"
/* Get the pointer to the first HGCM parameter. */
#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
((struct vmmdev_hgcm_function_parameter *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
/* Get the pointer to the first HGCM parameter in a 32-bit request. */
#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
((struct vmmdev_hgcm_function_parameter32 *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
#define GUEST_MAPPINGS_TRIES 5
#define VBG_KERNEL_REQUEST \
(VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
*
* This operation is a little bit tricky since the VMM might not accept
* just any address because of address clashes between the three contexts
* it operates in, so we try several times.
*
* Failure to reserve the guest mappings is ignored.
*
* @gdev: The Guest extension device.
*/
static void vbg_guest_mappings_init(struct vbg_dev *gdev)
{
struct vmmdev_hypervisorinfo *req;
void *guest_mappings[GUEST_MAPPINGS_TRIES];
struct page **pages = NULL;
u32 size, hypervisor_size;
int i, rc;
/* Query the required space. */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
VBG_KERNEL_REQUEST);
if (!req)
return;
req->hypervisor_start = 0;
req->hypervisor_size = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
goto out;
/*
* The VMM will report back if there is nothing it wants to map, like
* for instance in VT-x and AMD-V mode.
*/
if (req->hypervisor_size == 0)
goto out;
hypervisor_size = req->hypervisor_size;
/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
if (!pages)
goto out;
gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
if (!gdev->guest_mappings_dummy_page)
goto out;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = gdev->guest_mappings_dummy_page;
/*
* Try several times, the VMM might not accept some addresses because
* of address clashes between the three contexts.
*/
for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
VM_MAP, PAGE_KERNEL_RO);
if (!guest_mappings[i])
break;
req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
req->header.rc = VERR_INTERNAL_ERROR;
req->hypervisor_size = hypervisor_size;
req->hypervisor_start =
(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
rc = vbg_req_perform(gdev, req);
if (rc >= 0) {
gdev->guest_mappings = guest_mappings[i];
break;
}
}
/* Free vmap's from failed attempts. */
while (--i >= 0)
vunmap(guest_mappings[i]);
/* On failure free the dummy-page backing the vmap */
if (!gdev->guest_mappings) {
__free_page(gdev->guest_mappings_dummy_page);
gdev->guest_mappings_dummy_page = NULL;
}
out:
vbg_req_free(req, sizeof(*req));
kfree(pages);
}
/**
* Undo what vbg_guest_mappings_init did.
*
* @gdev: The Guest extension device.
*/
static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
{
struct vmmdev_hypervisorinfo *req;
int rc;
if (!gdev->guest_mappings)
return;
/*
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
VBG_KERNEL_REQUEST);
if (!req)
return;
req->hypervisor_start = 0;
req->hypervisor_size = 0;
rc = vbg_req_perform(gdev, req);
vbg_req_free(req, sizeof(*req));
if (rc < 0) {
vbg_err("%s error: %d\n", __func__, rc);
return;
}
vunmap(gdev->guest_mappings);
gdev->guest_mappings = NULL;
__free_page(gdev->guest_mappings_dummy_page);
gdev->guest_mappings_dummy_page = NULL;
}
/**
* Report the guest information to the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_report_guest_info(struct vbg_dev *gdev)
{
/*
* Allocate and fill in the two guest info reports.
*/
struct vmmdev_guest_info *req1 = NULL;
struct vmmdev_guest_info2 *req2 = NULL;
int rc, ret = -ENOMEM;
req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
VBG_KERNEL_REQUEST);
req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
VBG_KERNEL_REQUEST);
if (!req1 || !req2)
goto out_free;
req1->interface_version = VMMDEV_VERSION;
req1->os_type = VMMDEV_OSTYPE_LINUX26;
#if __BITS_PER_LONG == 64
req1->os_type |= VMMDEV_OSTYPE_X64;
#endif
req2->additions_major = VBG_VERSION_MAJOR;
req2->additions_minor = VBG_VERSION_MINOR;
req2->additions_build = VBG_VERSION_BUILD;
req2->additions_revision = VBG_SVN_REV;
req2->additions_features =
VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
strscpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
/*
* There are two protocols here:
* 1. INFO2 + INFO1. Supported by >=3.2.51.
* 2. INFO1 and optionally INFO2. The old protocol.
*
* We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
* if not supported by the VMMDev (message ordering requirement).
*/
rc = vbg_req_perform(gdev, req2);
if (rc >= 0) {
rc = vbg_req_perform(gdev, req1);
} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
rc = vbg_req_perform(gdev, req1);
if (rc >= 0) {
rc = vbg_req_perform(gdev, req2);
if (rc == VERR_NOT_IMPLEMENTED)
rc = VINF_SUCCESS;
}
}
ret = vbg_status_code_to_errno(rc);
out_free:
vbg_req_free(req2, sizeof(*req2));
vbg_req_free(req1, sizeof(*req1));
return ret;
}
/**
* Report the guest driver status to the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @active: Flag whether the driver is now active or not.
*/
static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
{
struct vmmdev_guest_status *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
if (active)
req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
else
req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
req->flags = 0;
rc = vbg_req_perform(gdev, req);
if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
rc = VINF_SUCCESS;
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/**
* Inflate the balloon by one chunk. The caller owns the balloon mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
*/
static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
{
struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
struct page **pages;
int i, rc, ret;
pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
sizeof(*pages),
GFP_KERNEL | __GFP_NOWARN);
if (!pages)
return -ENOMEM;
req->header.size = sizeof(*req);
req->inflate = true;
req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
if (!pages[i]) {
ret = -ENOMEM;
goto out_error;
}
req->phys_page[i] = page_to_phys(pages[i]);
}
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d\n", __func__, rc);
ret = vbg_status_code_to_errno(rc);
goto out_error;
}
gdev->mem_balloon.pages[chunk_idx] = pages;
return 0;
out_error:
while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return ret;
}
/**
* Deflate the balloon by one chunk. The caller owns the balloon mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
*/
static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
{
struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
struct page **pages = gdev->mem_balloon.pages[chunk_idx];
int i, rc;
req->header.size = sizeof(*req);
req->inflate = false;
req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
req->phys_page[i] = page_to_phys(pages[i]);
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d\n", __func__, rc);
return vbg_status_code_to_errno(rc);
}
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
__free_page(pages[i]);
kfree(pages);
gdev->mem_balloon.pages[chunk_idx] = NULL;
return 0;
}
/**
* Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
* the host wants the balloon to be and adjust accordingly.
*/
static void vbg_balloon_work(struct work_struct *work)
{
struct vbg_dev *gdev =
container_of(work, struct vbg_dev, mem_balloon.work);
struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
u32 i, chunks;
int rc, ret;
/*
* Setting this bit means that we request the value from the host and
* change the guest memory balloon according to the returned value.
*/
req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d)\n", __func__, rc);
return;
}
/*
* The host always returns the same maximum amount of chunks, so
* we do this once.
*/
if (!gdev->mem_balloon.max_chunks) {
gdev->mem_balloon.pages =
devm_kcalloc(gdev->dev, req->phys_mem_chunks,
sizeof(struct page **), GFP_KERNEL);
if (!gdev->mem_balloon.pages)
return;
gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
}
chunks = req->balloon_chunks;
if (chunks > gdev->mem_balloon.max_chunks) {
vbg_err("%s: illegal balloon size %u (max=%u)\n",
__func__, chunks, gdev->mem_balloon.max_chunks);
return;
}
if (chunks > gdev->mem_balloon.chunks) {
/* inflate */
for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
ret = vbg_balloon_inflate(gdev, i);
if (ret < 0)
return;
gdev->mem_balloon.chunks++;
}
} else {
/* deflate */
for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
ret = vbg_balloon_deflate(gdev, i);
if (ret < 0)
return;
gdev->mem_balloon.chunks--;
}
}
}
/**
* Callback for heartbeat timer.
*/
static void vbg_heartbeat_timer(struct timer_list *t)
{
struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
vbg_req_perform(gdev, gdev->guest_heartbeat_req);
mod_timer(&gdev->heartbeat_timer,
msecs_to_jiffies(gdev->heartbeat_interval_ms));
}
/**
* Configure the host to check guest's heartbeat
* and get heartbeat interval from the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @enabled: Set true to enable guest heartbeat checks on host.
*/
static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
{
struct vmmdev_heartbeat *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
req->enabled = enabled;
req->interval_ns = 0;
rc = vbg_req_perform(gdev, req);
do_div(req->interval_ns, 1000000); /* ns -> ms */
gdev->heartbeat_interval_ms = req->interval_ns;
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/**
* Initializes the heartbeat timer. This feature may be disabled by the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_heartbeat_init(struct vbg_dev *gdev)
{
int ret;
/* Make sure that heartbeat checking is disabled if we fail. */
ret = vbg_heartbeat_host_config(gdev, false);
if (ret < 0)
return ret;
ret = vbg_heartbeat_host_config(gdev, true);
if (ret < 0)
return ret;
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
VMMDEVREQ_GUEST_HEARTBEAT,
VBG_KERNEL_REQUEST);
if (!gdev->guest_heartbeat_req)
return -ENOMEM;
vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
__func__, gdev->heartbeat_interval_ms);
mod_timer(&gdev->heartbeat_timer, 0);
return 0;
}
/**
* Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
* @gdev: The Guest extension device.
*/
static void vbg_heartbeat_exit(struct vbg_dev *gdev)
{
del_timer_sync(&gdev->heartbeat_timer);
vbg_heartbeat_host_config(gdev, false);
vbg_req_free(gdev->guest_heartbeat_req,
sizeof(*gdev->guest_heartbeat_req));
}
/**
* Applies a change to the bit usage tracker.
* Return: true if the mask changed, false if not.
* @tracker: The bit usage tracker.
* @changed: The bits to change.
* @previous: The previous value of the bits.
*/
static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
u32 changed, u32 previous)
{
bool global_change = false;
while (changed) {
u32 bit = ffs(changed) - 1;
u32 bitmask = BIT(bit);
if (bitmask & previous) {
tracker->per_bit_usage[bit] -= 1;
if (tracker->per_bit_usage[bit] == 0) {
global_change = true;
tracker->mask &= ~bitmask;
}
} else {
tracker->per_bit_usage[bit] += 1;
if (tracker->per_bit_usage[bit] == 1) {
global_change = true;
tracker->mask |= bitmask;
}
}
changed &= ~bitmask;
}
return global_change;
}
/**
* Init and termination worker for resetting the (host) event filter on the host
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @fixed_events: Fixed events (init time).
*/
static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
u32 fixed_events)
{
struct vmmdev_mask *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
req->not_mask = U32_MAX & ~fixed_events;
req->or_mask = fixed_events;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/**
* Changes the event filter mask for the given session.
*
* This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
* do session cleanup. Takes the session mutex.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The events to add.
* @not_mask: The events to remove.
* @session_termination: Set if we're called by the session cleanup code.
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
*/
static int vbg_set_session_event_filter(struct vbg_dev *gdev,
struct vbg_session *session,
u32 or_mask, u32 not_mask,
bool session_termination)
{
struct vmmdev_mask *req;
u32 changed, previous;
int rc, ret = 0;
/*
* Allocate a request buffer before taking the spinlock, when
* the session is being terminated the requestor is the kernel,
* as we're cleaning up.
*/
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
session_termination ? VBG_KERNEL_REQUEST :
session->requestor);
if (!req) {
if (!session_termination)
return -ENOMEM;
/* Ignore allocation failure, we must do session cleanup. */
}
mutex_lock(&gdev->session_mutex);
/* Apply the changes to the session mask. */
previous = session->event_filter;
session->event_filter |= or_mask;
session->event_filter &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->event_filter;
if (!changed)
goto out;
vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
if (gdev->event_filter_host == or_mask || !req)
goto out;
gdev->event_filter_host = or_mask;
req->or_mask = or_mask;
req->not_mask = ~or_mask;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
ret = vbg_status_code_to_errno(rc);
/* Failed, roll back (unless it's session termination time). */
gdev->event_filter_host = U32_MAX;
if (session_termination)
goto out;
vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
session->event_filter);
session->event_filter = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
vbg_req_free(req, sizeof(*req));
return ret;
}
/**
* Init and termination worker for set guest capabilities to zero on the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
{
struct vmmdev_mask *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
req->not_mask = U32_MAX;
req->or_mask = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/**
* Set guest capabilities on the host.
* Must be called with gdev->session_mutex hold.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @session_termination: Set if we're called by the session cleanup code.
*/
static int vbg_set_host_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
bool session_termination)
{
struct vmmdev_mask *req;
u32 caps;
int rc;
WARN_ON(!mutex_is_locked(&gdev->session_mutex));
caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask;
if (gdev->guest_caps_host == caps)
return 0;
/* On termination the requestor is the kernel, as we're cleaning up. */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
session_termination ? VBG_KERNEL_REQUEST :
session->requestor);
if (!req) {
gdev->guest_caps_host = U32_MAX;
return -ENOMEM;
}
req->or_mask = caps;
req->not_mask = ~caps;
rc = vbg_req_perform(gdev, req);
vbg_req_free(req, sizeof(*req));
gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX;
return vbg_status_code_to_errno(rc);
}
/**
* Acquire (get exclusive access) guest capabilities for a session.
* Takes the session mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
* @or_mask: The capabilities to add.
* @not_mask: The capabilities to remove.
* @session_termination: Set if we're called by the session cleanup code.
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
*/
static int vbg_acquire_session_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
u32 or_mask, u32 not_mask,
u32 flags, bool session_termination)
{
unsigned long irqflags;
bool wakeup = false;
int ret = 0;
mutex_lock(&gdev->session_mutex);
if (gdev->set_guest_caps_tracker.mask & or_mask) {
vbg_err("%s error: cannot acquire caps which are currently set\n",
__func__);
ret = -EINVAL;
goto out;
}
/*
* Mark any caps in the or_mask as now being in acquire-mode. Note
* once caps are in acquire_mode they always stay in this mode.
* This impacts event handling, so we take the event-lock.
*/
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
gdev->acquire_mode_guest_caps |= or_mask;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
/* If we only have to switch the caps to acquire mode, we're done. */
if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
goto out;
not_mask &= ~or_mask; /* or_mask takes priority over not_mask */
not_mask &= session->acquired_guest_caps;
or_mask &= ~session->acquired_guest_caps;
if (or_mask == 0 && not_mask == 0)
goto out;
if (gdev->acquired_guest_caps & or_mask) {
ret = -EBUSY;
goto out;
}
gdev->acquired_guest_caps |= or_mask;
gdev->acquired_guest_caps &= ~not_mask;
/* session->acquired_guest_caps impacts event handling, take the lock */
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
session->acquired_guest_caps |= or_mask;
session->acquired_guest_caps &= ~not_mask;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
ret = vbg_set_host_capabilities(gdev, session, session_termination);
/* Roll back on failure, unless it's session termination time. */
if (ret < 0 && !session_termination) {
gdev->acquired_guest_caps &= ~or_mask;
gdev->acquired_guest_caps |= not_mask;
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
session->acquired_guest_caps &= ~or_mask;
session->acquired_guest_caps |= not_mask;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
}
/*
* If we added a capability, check if that means some other thread in
* our session should be unblocked because there are events pending
* (the result of vbg_get_allowed_event_mask_for_session() may change).
*
* HACK ALERT! When the seamless support capability is added we generate
* a seamless change event so that the ring-3 client can sync with
* the seamless state.
*/
if (ret == 0 && or_mask != 0) {
spin_lock_irqsave(&gdev->event_spinlock, irqflags);
if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
gdev->pending_events |=
VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
if (gdev->pending_events)
wakeup = true;
spin_unlock_irqrestore(&gdev->event_spinlock, irqflags);
if (wakeup)
wake_up(&gdev->event_wq);
}
out:
mutex_unlock(&gdev->session_mutex);
return ret;
}
/**
* Sets the guest capabilities for a session. Takes the session mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The capabilities to add.
* @not_mask: The capabilities to remove.
* @session_termination: Set if we're called by the session cleanup code.
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
*/
static int vbg_set_session_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
u32 or_mask, u32 not_mask,
bool session_termination)
{
u32 changed, previous;
int ret = 0;
mutex_lock(&gdev->session_mutex);
if (gdev->acquire_mode_guest_caps & or_mask) {
vbg_err("%s error: cannot set caps which are in acquire_mode\n",
__func__);
ret = -EBUSY;
goto out;
}
/* Apply the changes to the session mask. */
previous = session->set_guest_caps;
session->set_guest_caps |= or_mask;
session->set_guest_caps &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->set_guest_caps;
if (!changed)
goto out;
vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous);
ret = vbg_set_host_capabilities(gdev, session, session_termination);
/* Roll back on failure, unless it's session termination time. */
if (ret < 0 && !session_termination) {
vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed,
session->set_guest_caps);
session->set_guest_caps = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
return ret;
}
/**
* vbg_query_host_version get the host feature mask and version information.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_query_host_version(struct vbg_dev *gdev)
{
struct vmmdev_host_version *req;
int rc, ret;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
rc = vbg_req_perform(gdev, req);
ret = vbg_status_code_to_errno(rc);
if (ret) {
vbg_err("%s error: %d\n", __func__, rc);
goto out;
}
snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
req->major, req->minor, req->build, req->revision);
gdev->host_features = req->features;
vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
gdev->host_features);
if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
ret = -ENODEV;
}
out:
vbg_req_free(req, sizeof(*req));
return ret;
}
/**
* Initializes the VBoxGuest device extension when the
* device driver is loaded.
*
* The native code locates the VMMDev on the PCI bus and retrieve
* the MMIO and I/O port ranges, this function will take care of
* mapping the MMIO memory (if present). Upon successful return
* the native code should set up the interrupt handler.
*
* Return: 0 or negative errno value.
*
* @gdev: The Guest extension device.
* @fixed_events: Events that will be enabled upon init and no client
* will ever be allowed to mask.
*/
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
{
int ret = -ENOMEM;
gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
gdev->event_filter_host = U32_MAX; /* forces a report */
gdev->guest_caps_host = U32_MAX; /* forces a report */
init_waitqueue_head(&gdev->event_wq);
init_waitqueue_head(&gdev->hgcm_wq);
spin_lock_init(&gdev->event_spinlock);
mutex_init(&gdev->session_mutex);
mutex_init(&gdev->cancel_req_mutex);
timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
gdev->mem_balloon.get_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
VBG_KERNEL_REQUEST);
gdev->mem_balloon.change_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
VMMDEVREQ_CHANGE_MEMBALLOON,
VBG_KERNEL_REQUEST);
gdev->cancel_req =
vbg_req_alloc(sizeof(*(gdev->cancel_req)),
VMMDEVREQ_HGCM_CANCEL2,
VBG_KERNEL_REQUEST);
gdev->ack_events_req =
vbg_req_alloc(sizeof(*gdev->ack_events_req),
VMMDEVREQ_ACKNOWLEDGE_EVENTS,
VBG_KERNEL_REQUEST);
gdev->mouse_status_req =
vbg_req_alloc(sizeof(*gdev->mouse_status_req),
VMMDEVREQ_GET_MOUSE_STATUS,
VBG_KERNEL_REQUEST);
if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
!gdev->cancel_req || !gdev->ack_events_req ||
!gdev->mouse_status_req)
goto err_free_reqs;
ret = vbg_query_host_version(gdev);
if (ret)
goto err_free_reqs;
ret = vbg_report_guest_info(gdev);
if (ret) {
vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
goto err_free_reqs;
}
ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
if (ret) {
vbg_err("vboxguest: Error setting fixed event filter: %d\n",
ret);
goto err_free_reqs;
}
ret = vbg_reset_host_capabilities(gdev);
if (ret) {
vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
ret);
goto err_free_reqs;
}
ret = vbg_core_set_mouse_status(gdev, 0);
if (ret) {
vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
goto err_free_reqs;
}
/* These may fail without requiring the driver init to fail. */
vbg_guest_mappings_init(gdev);
vbg_heartbeat_init(gdev);
/* All Done! */
ret = vbg_report_driver_status(gdev, true);
if (ret < 0)
vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
return 0;
err_free_reqs:
vbg_req_free(gdev->mouse_status_req,
sizeof(*gdev->mouse_status_req));
vbg_req_free(gdev->ack_events_req,
sizeof(*gdev->ack_events_req));
vbg_req_free(gdev->cancel_req,
sizeof(*gdev->cancel_req));
vbg_req_free(gdev->mem_balloon.change_req,
sizeof(*gdev->mem_balloon.change_req));
vbg_req_free(gdev->mem_balloon.get_req,
sizeof(*gdev->mem_balloon.get_req));
return ret;
}
/**
* Call this on exit to clean-up vboxguest-core managed resources.
*
* The native code should call this before the driver is loaded,
* but don't call this on shutdown.
* @gdev: The Guest extension device.
*/
void vbg_core_exit(struct vbg_dev *gdev)
{
vbg_heartbeat_exit(gdev);
vbg_guest_mappings_exit(gdev);
/* Clear the host flags (mouse status etc). */
vbg_reset_host_event_filter(gdev, 0);
vbg_reset_host_capabilities(gdev);
vbg_core_set_mouse_status(gdev, 0);
vbg_req_free(gdev->mouse_status_req,
sizeof(*gdev->mouse_status_req));
vbg_req_free(gdev->ack_events_req,
sizeof(*gdev->ack_events_req));
vbg_req_free(gdev->cancel_req,
sizeof(*gdev->cancel_req));
vbg_req_free(gdev->mem_balloon.change_req,
sizeof(*gdev->mem_balloon.change_req));
vbg_req_free(gdev->mem_balloon.get_req,
sizeof(*gdev->mem_balloon.get_req));
}
/**
* Creates a VBoxGuest user session.
*
* vboxguest_linux.c calls this when userspace opens the char-device.
* Return: A pointer to the new session or an ERR_PTR on error.
* @gdev: The Guest extension device.
* @requestor: VMMDEV_REQUESTOR_* flags
*/
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
{
struct vbg_session *session;
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return ERR_PTR(-ENOMEM);
session->gdev = gdev;
session->requestor = requestor;
return session;
}
/**
* Closes a VBoxGuest session.
* @session: The session to close (and free).
*/
void vbg_core_close_session(struct vbg_session *session)
{
struct vbg_dev *gdev = session->gdev;
int i, rc;
vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true);
vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (!session->hgcm_client_ids[i])
continue;
/* requestor is kernel here, as we're cleaning up. */
vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
session->hgcm_client_ids[i], &rc);
}
kfree(session);
}
static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
size_t out_size)
{
if (hdr->size_in != (sizeof(*hdr) + in_size) ||
hdr->size_out != (sizeof(*hdr) + out_size))
return -EINVAL;
return 0;
}
static int vbg_ioctl_driver_version_info(
struct vbg_ioctl_driver_version_info *info)
{
const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
u16 min_maj_version, req_maj_version;
if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
return -EINVAL;
req_maj_version = info->u.in.req_version >> 16;
min_maj_version = info->u.in.min_version >> 16;
if (info->u.in.min_version > info->u.in.req_version ||
min_maj_version != req_maj_version)
return -EINVAL;
if (info->u.in.min_version <= VBG_IOC_VERSION &&
min_maj_version == vbg_maj_version) {
info->u.out.session_version = VBG_IOC_VERSION;
} else {
info->u.out.session_version = U32_MAX;
info->hdr.rc = VERR_VERSION_MISMATCH;
}
info->u.out.driver_version = VBG_IOC_VERSION;
info->u.out.driver_revision = 0;
info->u.out.reserved1 = 0;
info->u.out.reserved2 = 0;
return 0;
}
/* Must be called with the event_lock held */
static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev,
struct vbg_session *session)
{
u32 acquire_mode_caps = gdev->acquire_mode_guest_caps;
u32 session_acquired_caps = session->acquired_guest_caps;
u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK;
if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) &&
!(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) &&
!(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
return allowed_events;
}
static bool vbg_wait_event_cond(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
{
unsigned long flags;
bool wakeup;
u32 events;
spin_lock_irqsave(&gdev->event_spinlock, flags);
events = gdev->pending_events & event_mask;
events &= vbg_get_allowed_event_mask_for_session(gdev, session);
wakeup = events || session->cancel_waiters;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
return wakeup;
}
/* Must be called with the event_lock held */
static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
{
u32 events = gdev->pending_events & event_mask;
events &= vbg_get_allowed_event_mask_for_session(gdev, session);
gdev->pending_events &= ~events;
return events;
}
static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_wait_for_events *wait)
{
u32 timeout_ms = wait->u.in.timeout_ms;
u32 event_mask = wait->u.in.events;
unsigned long flags;
long timeout;
int ret = 0;
if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
return -EINVAL;
if (timeout_ms == U32_MAX)
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(timeout_ms);
wait->u.out.events = 0;
do {
timeout = wait_event_interruptible_timeout(
gdev->event_wq,
vbg_wait_event_cond(gdev, session, event_mask),
timeout);
spin_lock_irqsave(&gdev->event_spinlock, flags);
if (timeout < 0 || session->cancel_waiters) {
ret = -EINTR;
} else if (timeout == 0) {
ret = -ETIMEDOUT;
} else {
wait->u.out.events =
vbg_consume_events_locked(gdev, session, event_mask);
}
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
/*
* Someone else may have consumed the event(s) first, in
* which case we go back to waiting.
*/
} while (ret == 0 && wait->u.out.events == 0);
return ret;
}
static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hdr *hdr)
{
unsigned long flags;
if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
return -EINVAL;
spin_lock_irqsave(&gdev->event_spinlock, flags);
session->cancel_waiters = true;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
wake_up(&gdev->event_wq);
return 0;
}
/**
* Checks if the VMM request is allowed in the context of the given session.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The calling session.
* @req: The request.
*/
static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
const struct vmmdev_request_header *req)
{
const struct vmmdev_guest_status *guest_status;
bool trusted_apps_only;
switch (req->request_type) {
/* Trusted users apps only. */
case VMMDEVREQ_QUERY_CREDENTIALS:
case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
case VMMDEVREQ_REGISTER_SHARED_MODULE:
case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
case VMMDEVREQ_WRITE_COREDUMP:
case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
case VMMDEVREQ_CHECK_SHARED_MODULES:
case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
case VMMDEVREQ_REPORT_GUEST_STATS:
case VMMDEVREQ_REPORT_GUEST_USER_STATE:
case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
trusted_apps_only = true;
break;
/* Anyone. */
case VMMDEVREQ_GET_MOUSE_STATUS:
case VMMDEVREQ_SET_MOUSE_STATUS:
case VMMDEVREQ_SET_POINTER_SHAPE:
case VMMDEVREQ_GET_HOST_VERSION:
case VMMDEVREQ_IDLE:
case VMMDEVREQ_GET_HOST_TIME:
case VMMDEVREQ_SET_POWER_STATUS:
case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
case VMMDEVREQ_REPORT_GUEST_STATUS:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
case VMMDEVREQ_VIDEMODE_SUPPORTED:
case VMMDEVREQ_GET_HEIGHT_REDUCTION:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
case VMMDEVREQ_VIDEMODE_SUPPORTED2:
case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI:
case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
case VMMDEVREQ_GET_VRDPCHANGE_REQ:
case VMMDEVREQ_LOG_STRING:
case VMMDEVREQ_GET_SESSION_ID:
trusted_apps_only = false;
break;
/* Depends on the request parameters... */
case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
guest_status = (const struct vmmdev_guest_status *)req;
switch (guest_status->facility) {
case VBOXGUEST_FACILITY_TYPE_ALL:
case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
guest_status->facility);
return -EPERM;
case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
trusted_apps_only = true;
break;
case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
default:
trusted_apps_only = false;
break;
}
break;
/* Anything else is not allowed. */
default:
vbg_err("Denying userspace vmm call type %#08x\n",
req->request_type);
return -EPERM;
}
if (trusted_apps_only &&
(session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type);
return -EPERM;
}
return 0;
}
static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
struct vbg_session *session, void *data)
{
struct vbg_ioctl_hdr *hdr = data;
int ret;
if (hdr->size_in != hdr->size_out)
return -EINVAL;
if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
return -E2BIG;
if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
return -EINVAL;
ret = vbg_req_allowed(gdev, session, data);
if (ret < 0)
return ret;
vbg_req_perform(gdev, data);
WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
return 0;
}
static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hgcm_connect *conn)
{
u32 client_id;
int i, ret;
if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
return -EINVAL;
/* Find a free place in the sessions clients array and claim it */
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (!session->hgcm_client_ids[i]) {
session->hgcm_client_ids[i] = U32_MAX;
break;
}
}
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EMFILE;
ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
&client_id, &conn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && conn->hdr.rc >= 0) {
conn->u.out.client_id = client_id;
session->hgcm_client_ids[i] = client_id;
} else {
conn->u.out.client_id = 0;
session->hgcm_client_ids[i] = 0;
}
mutex_unlock(&gdev->session_mutex);
return ret;
}
static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hgcm_disconnect *disconn)
{
u32 client_id;
int i, ret;
if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
return -EINVAL;
client_id = disconn->u.in.client_id;
if (client_id == 0 || client_id == U32_MAX)
return -EINVAL;
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (session->hgcm_client_ids[i] == client_id) {
session->hgcm_client_ids[i] = U32_MAX;
break;
}
}
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EINVAL;
ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
&disconn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && disconn->hdr.rc >= 0)
session->hgcm_client_ids[i] = 0;
else
session->hgcm_client_ids[i] = client_id;
mutex_unlock(&gdev->session_mutex);
return ret;
}
static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
{
switch (type) {
case VMMDEV_HGCM_PARM_TYPE_32BIT:
case VMMDEV_HGCM_PARM_TYPE_64BIT:
case VMMDEV_HGCM_PARM_TYPE_LINADDR:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
return true;
default:
return false;
}
}
static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
struct vbg_session *session, bool f32bit,
struct vbg_ioctl_hgcm_call *call)
{
size_t actual_size;
u32 client_id;
int i, ret;
if (call->hdr.size_in < sizeof(*call))
return -EINVAL;
if (call->hdr.size_in != call->hdr.size_out)
return -EINVAL;
if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
return -E2BIG;
client_id = call->client_id;
if (client_id == 0 || client_id == U32_MAX)
return -EINVAL;
actual_size = sizeof(*call);
if (f32bit)
actual_size += call->parm_count *
sizeof(struct vmmdev_hgcm_function_parameter32);
else
actual_size += call->parm_count *
sizeof(struct vmmdev_hgcm_function_parameter);
if (call->hdr.size_in < actual_size) {
vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
call->hdr.size_in, actual_size);
return -EINVAL;
}
call->hdr.size_out = actual_size;
/* Validate parameter types */
if (f32bit) {
struct vmmdev_hgcm_function_parameter32 *parm =
VBG_IOCTL_HGCM_CALL_PARMS32(call);
for (i = 0; i < call->parm_count; i++)
if (!vbg_param_valid(parm[i].type))
return -EINVAL;
} else {
struct vmmdev_hgcm_function_parameter *parm =
VBG_IOCTL_HGCM_CALL_PARMS(call);
for (i = 0; i < call->parm_count; i++)
if (!vbg_param_valid(parm[i].type))
return -EINVAL;
}
/*
* Validate the client id.
*/
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
if (session->hgcm_client_ids[i] == client_id)
break;
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
client_id);
return -EINVAL;
}
if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc);
else
ret = vbg_hgcm_call(gdev, session->requestor, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
if (ret == -E2BIG) {
/* E2BIG needs to be reported through the hdr.rc field. */
call->hdr.rc = VERR_OUT_OF_RANGE;
ret = 0;
}
if (ret && ret != -EINTR && ret != -ETIMEDOUT)
vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
return ret;
}
static int vbg_ioctl_log(struct vbg_ioctl_log *log)
{
if (log->hdr.size_out != sizeof(log->hdr))
return -EINVAL;
vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
log->u.in.msg);
return 0;
}
static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_change_filter *filter)
{
u32 or_mask, not_mask;
if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
return -EINVAL;
or_mask = filter->u.in.or_mask;
not_mask = filter->u.in.not_mask;
if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
return -EINVAL;
return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
false);
}
static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_acquire_guest_caps *caps)
{
u32 flags, or_mask, not_mask;
if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0))
return -EINVAL;
flags = caps->u.in.flags;
or_mask = caps->u.in.or_mask;
not_mask = caps->u.in.not_mask;
if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK)
return -EINVAL;
if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
return -EINVAL;
return vbg_acquire_session_capabilities(gdev, session, or_mask,
not_mask, flags, false);
}
static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
{
u32 or_mask, not_mask;
int ret;
if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
return -EINVAL;
or_mask = caps->u.in.or_mask;
not_mask = caps->u.in.not_mask;
if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
return -EINVAL;
ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
false);
if (ret)
return ret;
caps->u.out.session_caps = session->set_guest_caps;
caps->u.out.global_caps = gdev->guest_caps_host;
return 0;
}
static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
struct vbg_ioctl_check_balloon *balloon_info)
{
if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
return -EINVAL;
balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
/*
* Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
* events entirely in the kernel, see vbg_core_isr().
*/
balloon_info->u.out.handle_in_r3 = false;
return 0;
}
static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_write_coredump *dump)
{
struct vmmdev_write_core_dump *req;
if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
return -EINVAL;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
session->requestor);
if (!req)
return -ENOMEM;
req->flags = dump->u.in.flags;
dump->hdr.rc = vbg_req_perform(gdev, req);
vbg_req_free(req, sizeof(*req));
return 0;
}
/**
* Common IOCtl for user to kernel communication.
* Return: 0 or negative errno value.
* @session: The client session.
* @req: The requested function.
* @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
*/
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
{
unsigned int req_no_size = req & ~IOCSIZE_MASK;
struct vbg_dev *gdev = session->gdev;
struct vbg_ioctl_hdr *hdr = data;
bool f32bit = false;
hdr->rc = VINF_SUCCESS;
if (!hdr->size_out)
hdr->size_out = hdr->size_in;
/*
* hdr->version and hdr->size_in / hdr->size_out minimum size are
* already checked by vbg_misc_device_ioctl().
*/
/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
return vbg_ioctl_vmmrequest(gdev, session, data);
if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
return -EINVAL;
/* Fixed size requests. */
switch (req) {
case VBG_IOCTL_DRIVER_VERSION_INFO:
return vbg_ioctl_driver_version_info(data);
case VBG_IOCTL_HGCM_CONNECT:
return vbg_ioctl_hgcm_connect(gdev, session, data);
case VBG_IOCTL_HGCM_DISCONNECT:
return vbg_ioctl_hgcm_disconnect(gdev, session, data);
case VBG_IOCTL_WAIT_FOR_EVENTS:
return vbg_ioctl_wait_for_events(gdev, session, data);
case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
case VBG_IOCTL_CHANGE_FILTER_MASK:
return vbg_ioctl_change_filter_mask(gdev, session, data);
case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
return vbg_ioctl_acquire_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
return vbg_ioctl_change_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHECK_BALLOON:
return vbg_ioctl_check_balloon(gdev, data);
case VBG_IOCTL_WRITE_CORE_DUMP:
return vbg_ioctl_write_core_dump(gdev, session, data);
}
/* Variable sized requests. */
switch (req_no_size) {
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
fallthrough;
#endif
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
case VBG_IOCTL_LOG_ALT(0):
return vbg_ioctl_log(data);
}
vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
return -ENOTTY;
}
/**
* Report guest supported mouse-features to the host.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @features: The set of features to report to the host.
*/
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
{
struct vmmdev_mouse_status *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
VBG_KERNEL_REQUEST);
if (!req)
return -ENOMEM;
req->mouse_features = features;
req->pointer_pos_x = 0;
req->pointer_pos_y = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
vbg_req_free(req, sizeof(*req));
return vbg_status_code_to_errno(rc);
}
/** Core interrupt service routine. */
irqreturn_t vbg_core_isr(int irq, void *dev_id)
{
struct vbg_dev *gdev = dev_id;
struct vmmdev_events *req = gdev->ack_events_req;
bool mouse_position_changed = false;
unsigned long flags;
u32 events = 0;
int rc;
if (!gdev->mmio->V.V1_04.have_events)
return IRQ_NONE;
/* Get and acknowlegde events. */
req->header.rc = VERR_INTERNAL_ERROR;
req->events = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("Error performing events req, rc: %d\n", rc);
return IRQ_NONE;
}
events = req->events;
if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
mouse_position_changed = true;
events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
}
if (events & VMMDEV_EVENT_HGCM) {
wake_up(&gdev->hgcm_wq);
events &= ~VMMDEV_EVENT_HGCM;
}
if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
schedule_work(&gdev->mem_balloon.work);
events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
}
if (events) {
spin_lock_irqsave(&gdev->event_spinlock, flags);
gdev->pending_events |= events;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
wake_up(&gdev->event_wq);
}
if (mouse_position_changed)
vbg_linux_mouse_event(gdev);
return IRQ_HANDLED;
}
| linux-master | drivers/virt/vboxguest/vboxguest_core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN Hypervisor Service Module (HSM)
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Fengwei Yin <[email protected]>
* Yakui Zhao <[email protected]>
*/
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/acrn.h>
#include <asm/hypervisor.h>
#include "acrn_drv.h"
/*
* When /dev/acrn_hsm is opened, a 'struct acrn_vm' object is created to
* represent a VM instance and continues to be associated with the opened file
* descriptor. All ioctl operations on this file descriptor will be targeted to
* the VM instance. Release of this file descriptor will destroy the object.
*/
static int acrn_dev_open(struct inode *inode, struct file *filp)
{
struct acrn_vm *vm;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
vm->vmid = ACRN_INVALID_VMID;
filp->private_data = vm;
return 0;
}
static int pmcmd_ioctl(u64 cmd, void __user *uptr)
{
struct acrn_pstate_data *px_data;
struct acrn_cstate_data *cx_data;
u64 *pm_info;
int ret = 0;
switch (cmd & PMCMD_TYPE_MASK) {
case ACRN_PMCMD_GET_PX_CNT:
case ACRN_PMCMD_GET_CX_CNT:
pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
if (!pm_info)
return -ENOMEM;
ret = hcall_get_cpu_state(cmd, virt_to_phys(pm_info));
if (ret < 0) {
kfree(pm_info);
break;
}
if (copy_to_user(uptr, pm_info, sizeof(u64)))
ret = -EFAULT;
kfree(pm_info);
break;
case ACRN_PMCMD_GET_PX_DATA:
px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
if (!px_data)
return -ENOMEM;
ret = hcall_get_cpu_state(cmd, virt_to_phys(px_data));
if (ret < 0) {
kfree(px_data);
break;
}
if (copy_to_user(uptr, px_data, sizeof(*px_data)))
ret = -EFAULT;
kfree(px_data);
break;
case ACRN_PMCMD_GET_CX_DATA:
cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
if (!cx_data)
return -ENOMEM;
ret = hcall_get_cpu_state(cmd, virt_to_phys(cx_data));
if (ret < 0) {
kfree(cx_data);
break;
}
if (copy_to_user(uptr, cx_data, sizeof(*cx_data)))
ret = -EFAULT;
kfree(cx_data);
break;
default:
break;
}
return ret;
}
/*
* HSM relies on hypercall layer of the ACRN hypervisor to do the
* sanity check against the input parameters.
*/
static long acrn_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long ioctl_param)
{
struct acrn_vm *vm = filp->private_data;
struct acrn_vm_creation *vm_param;
struct acrn_vcpu_regs *cpu_regs;
struct acrn_ioreq_notify notify;
struct acrn_ptdev_irq *irq_info;
struct acrn_ioeventfd ioeventfd;
struct acrn_vm_memmap memmap;
struct acrn_mmiodev *mmiodev;
struct acrn_msi_entry *msi;
struct acrn_pcidev *pcidev;
struct acrn_irqfd irqfd;
struct acrn_vdev *vdev;
struct page *page;
u64 cstate_cmd;
int i, ret = 0;
if (vm->vmid == ACRN_INVALID_VMID && cmd != ACRN_IOCTL_CREATE_VM) {
dev_dbg(acrn_dev.this_device,
"ioctl 0x%x: Invalid VM state!\n", cmd);
return -EINVAL;
}
switch (cmd) {
case ACRN_IOCTL_CREATE_VM:
vm_param = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_vm_creation));
if (IS_ERR(vm_param))
return PTR_ERR(vm_param);
if ((vm_param->reserved0 | vm_param->reserved1) != 0) {
kfree(vm_param);
return -EINVAL;
}
vm = acrn_vm_create(vm, vm_param);
if (!vm) {
ret = -EINVAL;
kfree(vm_param);
break;
}
if (copy_to_user((void __user *)ioctl_param, vm_param,
sizeof(struct acrn_vm_creation))) {
acrn_vm_destroy(vm);
ret = -EFAULT;
}
kfree(vm_param);
break;
case ACRN_IOCTL_START_VM:
ret = hcall_start_vm(vm->vmid);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to start VM %u!\n", vm->vmid);
break;
case ACRN_IOCTL_PAUSE_VM:
ret = hcall_pause_vm(vm->vmid);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to pause VM %u!\n", vm->vmid);
break;
case ACRN_IOCTL_RESET_VM:
ret = hcall_reset_vm(vm->vmid);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to restart VM %u!\n", vm->vmid);
break;
case ACRN_IOCTL_DESTROY_VM:
ret = acrn_vm_destroy(vm);
break;
case ACRN_IOCTL_SET_VCPU_REGS:
cpu_regs = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_vcpu_regs));
if (IS_ERR(cpu_regs))
return PTR_ERR(cpu_regs);
for (i = 0; i < ARRAY_SIZE(cpu_regs->reserved); i++)
if (cpu_regs->reserved[i]) {
kfree(cpu_regs);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_32); i++)
if (cpu_regs->vcpu_regs.reserved_32[i]) {
kfree(cpu_regs);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.reserved_64); i++)
if (cpu_regs->vcpu_regs.reserved_64[i]) {
kfree(cpu_regs);
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(cpu_regs->vcpu_regs.gdt.reserved); i++)
if (cpu_regs->vcpu_regs.gdt.reserved[i] |
cpu_regs->vcpu_regs.idt.reserved[i]) {
kfree(cpu_regs);
return -EINVAL;
}
ret = hcall_set_vcpu_regs(vm->vmid, virt_to_phys(cpu_regs));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to set regs state of VM%u!\n",
vm->vmid);
kfree(cpu_regs);
break;
case ACRN_IOCTL_SET_MEMSEG:
if (copy_from_user(&memmap, (void __user *)ioctl_param,
sizeof(memmap)))
return -EFAULT;
ret = acrn_vm_memseg_map(vm, &memmap);
break;
case ACRN_IOCTL_UNSET_MEMSEG:
if (copy_from_user(&memmap, (void __user *)ioctl_param,
sizeof(memmap)))
return -EFAULT;
ret = acrn_vm_memseg_unmap(vm, &memmap);
break;
case ACRN_IOCTL_ASSIGN_MMIODEV:
mmiodev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_mmiodev));
if (IS_ERR(mmiodev))
return PTR_ERR(mmiodev);
ret = hcall_assign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to assign MMIO device!\n");
kfree(mmiodev);
break;
case ACRN_IOCTL_DEASSIGN_MMIODEV:
mmiodev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_mmiodev));
if (IS_ERR(mmiodev))
return PTR_ERR(mmiodev);
ret = hcall_deassign_mmiodev(vm->vmid, virt_to_phys(mmiodev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to deassign MMIO device!\n");
kfree(mmiodev);
break;
case ACRN_IOCTL_ASSIGN_PCIDEV:
pcidev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_pcidev));
if (IS_ERR(pcidev))
return PTR_ERR(pcidev);
ret = hcall_assign_pcidev(vm->vmid, virt_to_phys(pcidev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to assign pci device!\n");
kfree(pcidev);
break;
case ACRN_IOCTL_DEASSIGN_PCIDEV:
pcidev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_pcidev));
if (IS_ERR(pcidev))
return PTR_ERR(pcidev);
ret = hcall_deassign_pcidev(vm->vmid, virt_to_phys(pcidev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to deassign pci device!\n");
kfree(pcidev);
break;
case ACRN_IOCTL_CREATE_VDEV:
vdev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_vdev));
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = hcall_create_vdev(vm->vmid, virt_to_phys(vdev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to create virtual device!\n");
kfree(vdev);
break;
case ACRN_IOCTL_DESTROY_VDEV:
vdev = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_vdev));
if (IS_ERR(vdev))
return PTR_ERR(vdev);
ret = hcall_destroy_vdev(vm->vmid, virt_to_phys(vdev));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to destroy virtual device!\n");
kfree(vdev);
break;
case ACRN_IOCTL_SET_PTDEV_INTR:
irq_info = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_ptdev_irq));
if (IS_ERR(irq_info))
return PTR_ERR(irq_info);
ret = hcall_set_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to configure intr for ptdev!\n");
kfree(irq_info);
break;
case ACRN_IOCTL_RESET_PTDEV_INTR:
irq_info = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_ptdev_irq));
if (IS_ERR(irq_info))
return PTR_ERR(irq_info);
ret = hcall_reset_ptdev_intr(vm->vmid, virt_to_phys(irq_info));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to reset intr for ptdev!\n");
kfree(irq_info);
break;
case ACRN_IOCTL_SET_IRQLINE:
ret = hcall_set_irqline(vm->vmid, ioctl_param);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to set interrupt line!\n");
break;
case ACRN_IOCTL_INJECT_MSI:
msi = memdup_user((void __user *)ioctl_param,
sizeof(struct acrn_msi_entry));
if (IS_ERR(msi))
return PTR_ERR(msi);
ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to inject MSI!\n");
kfree(msi);
break;
case ACRN_IOCTL_VM_INTR_MONITOR:
ret = pin_user_pages_fast(ioctl_param, 1,
FOLL_WRITE | FOLL_LONGTERM, &page);
if (unlikely(ret != 1)) {
dev_dbg(acrn_dev.this_device,
"Failed to pin intr hdr buffer!\n");
return -EFAULT;
}
ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page));
if (ret < 0) {
unpin_user_page(page);
dev_dbg(acrn_dev.this_device,
"Failed to monitor intr data!\n");
return ret;
}
if (vm->monitor_page)
unpin_user_page(vm->monitor_page);
vm->monitor_page = page;
break;
case ACRN_IOCTL_CREATE_IOREQ_CLIENT:
if (vm->default_client)
return -EEXIST;
if (!acrn_ioreq_client_create(vm, NULL, NULL, true, "acrndm"))
ret = -EINVAL;
break;
case ACRN_IOCTL_DESTROY_IOREQ_CLIENT:
if (vm->default_client)
acrn_ioreq_client_destroy(vm->default_client);
break;
case ACRN_IOCTL_ATTACH_IOREQ_CLIENT:
if (vm->default_client)
ret = acrn_ioreq_client_wait(vm->default_client);
else
ret = -ENODEV;
break;
case ACRN_IOCTL_NOTIFY_REQUEST_FINISH:
if (copy_from_user(¬ify, (void __user *)ioctl_param,
sizeof(struct acrn_ioreq_notify)))
return -EFAULT;
if (notify.reserved != 0)
return -EINVAL;
ret = acrn_ioreq_request_default_complete(vm, notify.vcpu);
break;
case ACRN_IOCTL_CLEAR_VM_IOREQ:
acrn_ioreq_request_clear(vm);
break;
case ACRN_IOCTL_PM_GET_CPU_STATE:
if (copy_from_user(&cstate_cmd, (void __user *)ioctl_param,
sizeof(cstate_cmd)))
return -EFAULT;
ret = pmcmd_ioctl(cstate_cmd, (void __user *)ioctl_param);
break;
case ACRN_IOCTL_IOEVENTFD:
if (copy_from_user(&ioeventfd, (void __user *)ioctl_param,
sizeof(ioeventfd)))
return -EFAULT;
if (ioeventfd.reserved != 0)
return -EINVAL;
ret = acrn_ioeventfd_config(vm, &ioeventfd);
break;
case ACRN_IOCTL_IRQFD:
if (copy_from_user(&irqfd, (void __user *)ioctl_param,
sizeof(irqfd)))
return -EFAULT;
ret = acrn_irqfd_config(vm, &irqfd);
break;
default:
dev_dbg(acrn_dev.this_device, "Unknown IOCTL 0x%x!\n", cmd);
ret = -ENOTTY;
}
return ret;
}
static int acrn_dev_release(struct inode *inode, struct file *filp)
{
struct acrn_vm *vm = filp->private_data;
acrn_vm_destroy(vm);
kfree(vm);
return 0;
}
static ssize_t remove_cpu_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 cpu, lapicid;
int ret;
if (kstrtoull(buf, 0, &cpu) < 0)
return -EINVAL;
if (cpu >= num_possible_cpus() || cpu == 0 || !cpu_is_hotpluggable(cpu))
return -EINVAL;
if (cpu_online(cpu))
remove_cpu(cpu);
lapicid = cpu_data(cpu).apicid;
dev_dbg(dev, "Try to remove cpu %lld with lapicid %lld\n", cpu, lapicid);
ret = hcall_sos_remove_cpu(lapicid);
if (ret < 0) {
dev_err(dev, "Failed to remove cpu %lld!\n", cpu);
goto fail_remove;
}
return count;
fail_remove:
add_cpu(cpu);
return ret;
}
static DEVICE_ATTR_WO(remove_cpu);
static umode_t acrn_attr_visible(struct kobject *kobj, struct attribute *a, int n)
{
if (a == &dev_attr_remove_cpu.attr)
return IS_ENABLED(CONFIG_HOTPLUG_CPU) ? a->mode : 0;
return a->mode;
}
static struct attribute *acrn_attrs[] = {
&dev_attr_remove_cpu.attr,
NULL
};
static struct attribute_group acrn_attr_group = {
.attrs = acrn_attrs,
.is_visible = acrn_attr_visible,
};
static const struct attribute_group *acrn_attr_groups[] = {
&acrn_attr_group,
NULL
};
static const struct file_operations acrn_fops = {
.owner = THIS_MODULE,
.open = acrn_dev_open,
.release = acrn_dev_release,
.unlocked_ioctl = acrn_dev_ioctl,
};
struct miscdevice acrn_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "acrn_hsm",
.fops = &acrn_fops,
.groups = acrn_attr_groups,
};
static int __init hsm_init(void)
{
int ret;
if (x86_hyper_type != X86_HYPER_ACRN)
return -ENODEV;
if (!(cpuid_eax(ACRN_CPUID_FEATURES) & ACRN_FEATURE_PRIVILEGED_VM))
return -EPERM;
ret = misc_register(&acrn_dev);
if (ret) {
pr_err("Create misc dev failed!\n");
return ret;
}
ret = acrn_ioreq_intr_setup();
if (ret) {
pr_err("Setup I/O request handler failed!\n");
misc_deregister(&acrn_dev);
return ret;
}
return 0;
}
static void __exit hsm_exit(void)
{
acrn_ioreq_intr_remove();
misc_deregister(&acrn_dev);
}
module_init(hsm_init);
module_exit(hsm_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ACRN Hypervisor Service Module (HSM)");
| linux-master | drivers/virt/acrn/hsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN_HSM: Virtual Machine management
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Jason Chen CJ <[email protected]>
* Yakui Zhao <[email protected]>
*/
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "acrn_drv.h"
/* List of VMs */
LIST_HEAD(acrn_vm_list);
/*
* acrn_vm_list is read in a worker thread which dispatch I/O requests and
* is wrote in VM creation ioctl. Use the rwlock mechanism to protect it.
*/
DEFINE_RWLOCK(acrn_vm_list_lock);
struct acrn_vm *acrn_vm_create(struct acrn_vm *vm,
struct acrn_vm_creation *vm_param)
{
int ret;
ret = hcall_create_vm(virt_to_phys(vm_param));
if (ret < 0 || vm_param->vmid == ACRN_INVALID_VMID) {
dev_err(acrn_dev.this_device,
"Failed to create VM! Error: %d\n", ret);
return NULL;
}
mutex_init(&vm->regions_mapping_lock);
INIT_LIST_HEAD(&vm->ioreq_clients);
spin_lock_init(&vm->ioreq_clients_lock);
vm->vmid = vm_param->vmid;
vm->vcpu_num = vm_param->vcpu_num;
if (acrn_ioreq_init(vm, vm_param->ioreq_buf) < 0) {
hcall_destroy_vm(vm_param->vmid);
vm->vmid = ACRN_INVALID_VMID;
return NULL;
}
write_lock_bh(&acrn_vm_list_lock);
list_add(&vm->list, &acrn_vm_list);
write_unlock_bh(&acrn_vm_list_lock);
acrn_ioeventfd_init(vm);
acrn_irqfd_init(vm);
dev_dbg(acrn_dev.this_device, "VM %u created.\n", vm->vmid);
return vm;
}
int acrn_vm_destroy(struct acrn_vm *vm)
{
int ret;
if (vm->vmid == ACRN_INVALID_VMID ||
test_and_set_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags))
return 0;
ret = hcall_destroy_vm(vm->vmid);
if (ret < 0) {
dev_err(acrn_dev.this_device,
"Failed to destroy VM %u\n", vm->vmid);
clear_bit(ACRN_VM_FLAG_DESTROYED, &vm->flags);
return ret;
}
/* Remove from global VM list */
write_lock_bh(&acrn_vm_list_lock);
list_del_init(&vm->list);
write_unlock_bh(&acrn_vm_list_lock);
acrn_ioeventfd_deinit(vm);
acrn_irqfd_deinit(vm);
acrn_ioreq_deinit(vm);
if (vm->monitor_page) {
put_page(vm->monitor_page);
vm->monitor_page = NULL;
}
acrn_vm_all_ram_unmap(vm);
dev_dbg(acrn_dev.this_device, "VM %u destroyed.\n", vm->vmid);
vm->vmid = ACRN_INVALID_VMID;
return 0;
}
/**
* acrn_msi_inject() - Inject a MSI interrupt into a User VM
* @vm: User VM
* @msi_addr: The MSI address
* @msi_data: The MSI data
*
* Return: 0 on success, <0 on error
*/
int acrn_msi_inject(struct acrn_vm *vm, u64 msi_addr, u64 msi_data)
{
struct acrn_msi_entry *msi;
int ret;
/* might be used in interrupt context, so use GFP_ATOMIC */
msi = kzalloc(sizeof(*msi), GFP_ATOMIC);
if (!msi)
return -ENOMEM;
/*
* msi_addr: addr[19:12] with dest vcpu id
* msi_data: data[7:0] with vector
*/
msi->msi_addr = msi_addr;
msi->msi_data = msi_data;
ret = hcall_inject_msi(vm->vmid, virt_to_phys(msi));
if (ret < 0)
dev_err(acrn_dev.this_device,
"Failed to inject MSI to VM %u!\n", vm->vmid);
kfree(msi);
return ret;
}
| linux-master | drivers/virt/acrn/vm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN HSM eventfd - use eventfd objects to signal expected I/O requests
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Shuo Liu <[email protected]>
* Yakui Zhao <[email protected]>
*/
#include <linux/eventfd.h>
#include <linux/slab.h>
#include "acrn_drv.h"
/**
* struct hsm_ioeventfd - Properties of HSM ioeventfd
* @list: Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
* @eventfd: Eventfd of the HSM ioeventfd
* @addr: Address of I/O range
* @data: Data for matching
* @length: Length of I/O range
* @type: Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO)
* @wildcard: Data matching or not
*/
struct hsm_ioeventfd {
struct list_head list;
struct eventfd_ctx *eventfd;
u64 addr;
u64 data;
int length;
int type;
bool wildcard;
};
static inline int ioreq_type_from_flags(int flags)
{
return flags & ACRN_IOEVENTFD_FLAG_PIO ?
ACRN_IOREQ_TYPE_PORTIO : ACRN_IOREQ_TYPE_MMIO;
}
static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p)
{
lockdep_assert_held(&vm->ioeventfds_lock);
eventfd_ctx_put(p->eventfd);
list_del(&p->list);
kfree(p);
}
static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm,
struct hsm_ioeventfd *ioeventfd)
{
struct hsm_ioeventfd *p;
lockdep_assert_held(&vm->ioeventfds_lock);
/* Either one is wildcard, the data matching will be skipped. */
list_for_each_entry(p, &vm->ioeventfds, list)
if (p->eventfd == ioeventfd->eventfd &&
p->addr == ioeventfd->addr &&
p->type == ioeventfd->type &&
(p->wildcard || ioeventfd->wildcard ||
p->data == ioeventfd->data))
return true;
return false;
}
/*
* Assign an eventfd to a VM and create a HSM ioeventfd associated with the
* eventfd. The properties of the HSM ioeventfd are built from a &struct
* acrn_ioeventfd.
*/
static int acrn_ioeventfd_assign(struct acrn_vm *vm,
struct acrn_ioeventfd *args)
{
struct eventfd_ctx *eventfd;
struct hsm_ioeventfd *p;
int ret;
/* Check for range overflow */
if (args->addr + args->len < args->addr)
return -EINVAL;
/*
* Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width
* accesses can cover vhost's requirements.
*/
if (!(args->len == 1 || args->len == 2 ||
args->len == 4 || args->len == 8))
return -EINVAL;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&p->list);
p->addr = args->addr;
p->length = args->len;
p->eventfd = eventfd;
p->type = ioreq_type_from_flags(args->flags);
/*
* ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the
* writing of notification register of each virtqueue may trigger the
* notification. There is no data matching requirement.
*/
if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
p->data = args->data;
else
p->wildcard = true;
mutex_lock(&vm->ioeventfds_lock);
if (hsm_ioeventfd_is_conflict(vm, p)) {
ret = -EEXIST;
goto unlock_fail;
}
/* register the I/O range into ioreq client */
ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
p->addr, p->addr + p->length - 1);
if (ret < 0)
goto unlock_fail;
list_add_tail(&p->list, &vm->ioeventfds);
mutex_unlock(&vm->ioeventfds_lock);
return 0;
unlock_fail:
mutex_unlock(&vm->ioeventfds_lock);
kfree(p);
fail:
eventfd_ctx_put(eventfd);
return ret;
}
static int acrn_ioeventfd_deassign(struct acrn_vm *vm,
struct acrn_ioeventfd *args)
{
struct hsm_ioeventfd *p;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
mutex_lock(&vm->ioeventfds_lock);
list_for_each_entry(p, &vm->ioeventfds, list) {
if (p->eventfd != eventfd)
continue;
acrn_ioreq_range_del(vm->ioeventfd_client, p->type,
p->addr, p->addr + p->length - 1);
acrn_ioeventfd_shutdown(vm, p);
break;
}
mutex_unlock(&vm->ioeventfds_lock);
eventfd_ctx_put(eventfd);
return 0;
}
static struct hsm_ioeventfd *hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr,
u64 data, int len, int type)
{
struct hsm_ioeventfd *p = NULL;
lockdep_assert_held(&vm->ioeventfds_lock);
list_for_each_entry(p, &vm->ioeventfds, list) {
if (p->type == type && p->addr == addr && p->length >= len &&
(p->wildcard || p->data == data))
return p;
}
return NULL;
}
static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
struct acrn_io_request *req)
{
struct hsm_ioeventfd *p;
u64 addr, val;
int size;
if (req->type == ACRN_IOREQ_TYPE_MMIO) {
/*
* I/O requests are dispatched by range check only, so a
* acrn_ioreq_client need process both READ and WRITE accesses
* of same range. READ accesses are safe to be ignored here
* because virtio PCI devices write the notify registers for
* notification.
*/
if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
/* reading does nothing and return 0 */
req->reqs.mmio_request.value = 0;
return 0;
}
addr = req->reqs.mmio_request.address;
size = req->reqs.mmio_request.size;
val = req->reqs.mmio_request.value;
} else {
if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) {
/* reading does nothing and return 0 */
req->reqs.pio_request.value = 0;
return 0;
}
addr = req->reqs.pio_request.address;
size = req->reqs.pio_request.size;
val = req->reqs.pio_request.value;
}
mutex_lock(&client->vm->ioeventfds_lock);
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
if (p)
eventfd_signal(p->eventfd, 1);
mutex_unlock(&client->vm->ioeventfds_lock);
return 0;
}
int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
{
int ret;
if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
ret = acrn_ioeventfd_deassign(vm, args);
else
ret = acrn_ioeventfd_assign(vm, args);
return ret;
}
int acrn_ioeventfd_init(struct acrn_vm *vm)
{
char name[ACRN_NAME_LEN];
mutex_init(&vm->ioeventfds_lock);
INIT_LIST_HEAD(&vm->ioeventfds);
snprintf(name, sizeof(name), "ioeventfd-%u", vm->vmid);
vm->ioeventfd_client = acrn_ioreq_client_create(vm,
acrn_ioeventfd_handler,
NULL, false, name);
if (!vm->ioeventfd_client) {
dev_err(acrn_dev.this_device, "Failed to create ioeventfd ioreq client!\n");
return -EINVAL;
}
dev_dbg(acrn_dev.this_device, "VM %u ioeventfd init.\n", vm->vmid);
return 0;
}
void acrn_ioeventfd_deinit(struct acrn_vm *vm)
{
struct hsm_ioeventfd *p, *next;
dev_dbg(acrn_dev.this_device, "VM %u ioeventfd deinit.\n", vm->vmid);
acrn_ioreq_client_destroy(vm->ioeventfd_client);
mutex_lock(&vm->ioeventfds_lock);
list_for_each_entry_safe(p, next, &vm->ioeventfds, list)
acrn_ioeventfd_shutdown(vm, p);
mutex_unlock(&vm->ioeventfds_lock);
}
| linux-master | drivers/virt/acrn/ioeventfd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN: Memory mapping management
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Fei Li <[email protected]>
* Shuo Liu <[email protected]>
*/
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "acrn_drv.h"
static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
{
struct vm_memory_region_batch *regions;
int ret;
regions = kzalloc(sizeof(*regions), GFP_KERNEL);
if (!regions)
return -ENOMEM;
regions->vmid = vm->vmid;
regions->regions_num = 1;
regions->regions_gpa = virt_to_phys(region);
ret = hcall_set_memory_regions(virt_to_phys(regions));
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Failed to set memory region for VM[%u]!\n", vm->vmid);
kfree(regions);
return ret;
}
/**
* acrn_mm_region_add() - Set up the EPT mapping of a memory region.
* @vm: User VM.
* @user_gpa: A GPA of User VM.
* @service_gpa: A GPA of Service VM.
* @size: Size of the region.
* @mem_type: Combination of ACRN_MEM_TYPE_*.
* @mem_access_right: Combination of ACRN_MEM_ACCESS_*.
*
* Return: 0 on success, <0 on error.
*/
int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
u64 size, u32 mem_type, u32 mem_access_right)
{
struct vm_memory_region_op *region;
int ret = 0;
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
region->type = ACRN_MEM_REGION_ADD;
region->user_vm_pa = user_gpa;
region->service_vm_pa = service_gpa;
region->size = size;
region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
(mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
ret = modify_region(vm, region);
dev_dbg(acrn_dev.this_device,
"%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
__func__, (void *)user_gpa, (void *)service_gpa, size);
kfree(region);
return ret;
}
/**
* acrn_mm_region_del() - Del the EPT mapping of a memory region.
* @vm: User VM.
* @user_gpa: A GPA of the User VM.
* @size: Size of the region.
*
* Return: 0 on success, <0 for error.
*/
int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
{
struct vm_memory_region_op *region;
int ret = 0;
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
region->type = ACRN_MEM_REGION_DEL;
region->user_vm_pa = user_gpa;
region->service_vm_pa = 0UL;
region->size = size;
region->attr = 0U;
ret = modify_region(vm, region);
dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
__func__, (void *)user_gpa, size);
kfree(region);
return ret;
}
int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
{
int ret;
if (memmap->type == ACRN_MEMMAP_RAM)
return acrn_vm_ram_map(vm, memmap);
if (memmap->type != ACRN_MEMMAP_MMIO) {
dev_dbg(acrn_dev.this_device,
"Invalid memmap type: %u\n", memmap->type);
return -EINVAL;
}
ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
memmap->service_vm_pa, memmap->len,
ACRN_MEM_TYPE_UC, memmap->attr);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Add memory region failed, VM[%u]!\n", vm->vmid);
return ret;
}
int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
{
int ret;
if (memmap->type != ACRN_MEMMAP_MMIO) {
dev_dbg(acrn_dev.this_device,
"Invalid memmap type: %u\n", memmap->type);
return -EINVAL;
}
ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
if (ret < 0)
dev_dbg(acrn_dev.this_device,
"Del memory region failed, VM[%u]!\n", vm->vmid);
return ret;
}
/**
* acrn_vm_ram_map() - Create a RAM EPT mapping of User VM.
* @vm: The User VM pointer
* @memmap: Info of the EPT mapping
*
* Return: 0 on success, <0 for error.
*/
int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
{
struct vm_memory_region_batch *regions_info;
int nr_pages, i = 0, order, nr_regions = 0;
struct vm_memory_mapping *region_mapping;
struct vm_memory_region_op *vm_region;
struct page **pages = NULL, *page;
void *remap_vaddr;
int ret, pinned;
u64 user_vm_pa;
unsigned long pfn;
struct vm_area_struct *vma;
if (!vm || !memmap)
return -EINVAL;
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, memmap->vma_base);
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
mmap_read_unlock(current->mm);
return -EINVAL;
}
ret = follow_pfn(vma, memmap->vma_base, &pfn);
mmap_read_unlock(current->mm);
if (ret < 0) {
dev_dbg(acrn_dev.this_device,
"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
return ret;
}
return acrn_mm_region_add(vm, memmap->user_vm_pa,
PFN_PHYS(pfn), memmap->len,
ACRN_MEM_TYPE_WB, memmap->attr);
}
mmap_read_unlock(current->mm);
/* Get the page number of the map region */
nr_pages = memmap->len >> PAGE_SHIFT;
pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
if (!pages)
return -ENOMEM;
/* Lock the pages of user memory map region */
pinned = pin_user_pages_fast(memmap->vma_base,
nr_pages, FOLL_WRITE | FOLL_LONGTERM,
pages);
if (pinned < 0) {
ret = pinned;
goto free_pages;
} else if (pinned != nr_pages) {
ret = -EFAULT;
goto put_pages;
}
/* Create a kernel map for the map region */
remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
if (!remap_vaddr) {
ret = -ENOMEM;
goto put_pages;
}
/* Record Service VM va <-> User VM pa mapping */
mutex_lock(&vm->regions_mapping_lock);
region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
region_mapping->pages = pages;
region_mapping->npages = nr_pages;
region_mapping->size = memmap->len;
region_mapping->service_vm_va = remap_vaddr;
region_mapping->user_vm_pa = memmap->user_vm_pa;
vm->regions_mapping_count++;
} else {
dev_warn(acrn_dev.this_device,
"Run out of memory mapping slots!\n");
ret = -ENOMEM;
mutex_unlock(&vm->regions_mapping_lock);
goto unmap_no_count;
}
mutex_unlock(&vm->regions_mapping_lock);
/* Calculate count of vm_memory_region_op */
while (i < nr_pages) {
page = pages[i];
VM_BUG_ON_PAGE(PageTail(page), page);
order = compound_order(page);
nr_regions++;
i += 1 << order;
}
/* Prepare the vm_memory_region_batch */
regions_info = kzalloc(struct_size(regions_info, regions_op,
nr_regions), GFP_KERNEL);
if (!regions_info) {
ret = -ENOMEM;
goto unmap_kernel_map;
}
/* Fill each vm_memory_region_op */
vm_region = regions_info->regions_op;
regions_info->vmid = vm->vmid;
regions_info->regions_num = nr_regions;
regions_info->regions_gpa = virt_to_phys(vm_region);
user_vm_pa = memmap->user_vm_pa;
i = 0;
while (i < nr_pages) {
u32 region_size;
page = pages[i];
VM_BUG_ON_PAGE(PageTail(page), page);
order = compound_order(page);
region_size = PAGE_SIZE << order;
vm_region->type = ACRN_MEM_REGION_ADD;
vm_region->user_vm_pa = user_vm_pa;
vm_region->service_vm_pa = page_to_phys(page);
vm_region->size = region_size;
vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
(memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
vm_region++;
user_vm_pa += region_size;
i += 1 << order;
}
/* Inform the ACRN Hypervisor to set up EPT mappings */
ret = hcall_set_memory_regions(virt_to_phys(regions_info));
if (ret < 0) {
dev_dbg(acrn_dev.this_device,
"Failed to set regions, VM[%u]!\n", vm->vmid);
goto unset_region;
}
kfree(regions_info);
dev_dbg(acrn_dev.this_device,
"%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
__func__, vm->vmid,
remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
return ret;
unset_region:
kfree(regions_info);
unmap_kernel_map:
mutex_lock(&vm->regions_mapping_lock);
vm->regions_mapping_count--;
mutex_unlock(&vm->regions_mapping_lock);
unmap_no_count:
vunmap(remap_vaddr);
put_pages:
for (i = 0; i < pinned; i++)
unpin_user_page(pages[i]);
free_pages:
vfree(pages);
return ret;
}
/**
* acrn_vm_all_ram_unmap() - Destroy a RAM EPT mapping of User VM.
* @vm: The User VM
*/
void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
{
struct vm_memory_mapping *region_mapping;
int i, j;
mutex_lock(&vm->regions_mapping_lock);
for (i = 0; i < vm->regions_mapping_count; i++) {
region_mapping = &vm->regions_mapping[i];
vunmap(region_mapping->service_vm_va);
for (j = 0; j < region_mapping->npages; j++)
unpin_user_page(region_mapping->pages[j]);
vfree(region_mapping->pages);
}
mutex_unlock(&vm->regions_mapping_lock);
}
| linux-master | drivers/virt/acrn/mm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Shuo Liu <[email protected]>
* Yakui Zhao <[email protected]>
*/
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include "acrn_drv.h"
static LIST_HEAD(acrn_irqfd_clients);
/**
* struct hsm_irqfd - Properties of HSM irqfd
* @vm: Associated VM pointer
* @wait: Entry of wait-queue
* @shutdown: Async shutdown work
* @eventfd: Associated eventfd
* @list: Entry within &acrn_vm.irqfds of irqfds of a VM
* @pt: Structure for select/poll on the associated eventfd
* @msi: MSI data
*/
struct hsm_irqfd {
struct acrn_vm *vm;
wait_queue_entry_t wait;
struct work_struct shutdown;
struct eventfd_ctx *eventfd;
struct list_head list;
poll_table pt;
struct acrn_msi_entry msi;
};
static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
{
struct acrn_vm *vm = irqfd->vm;
acrn_msi_inject(vm, irqfd->msi.msi_addr,
irqfd->msi.msi_data);
}
static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
{
u64 cnt;
lockdep_assert_held(&irqfd->vm->irqfds_lock);
/* remove from wait queue */
list_del_init(&irqfd->list);
eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
eventfd_ctx_put(irqfd->eventfd);
kfree(irqfd);
}
static void hsm_irqfd_shutdown_work(struct work_struct *work)
{
struct hsm_irqfd *irqfd;
struct acrn_vm *vm;
irqfd = container_of(work, struct hsm_irqfd, shutdown);
vm = irqfd->vm;
mutex_lock(&vm->irqfds_lock);
if (!list_empty(&irqfd->list))
hsm_irqfd_shutdown(irqfd);
mutex_unlock(&vm->irqfds_lock);
}
/* Called with wqh->lock held and interrupts disabled */
static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
int sync, void *key)
{
unsigned long poll_bits = (unsigned long)key;
struct hsm_irqfd *irqfd;
struct acrn_vm *vm;
irqfd = container_of(wait, struct hsm_irqfd, wait);
vm = irqfd->vm;
if (poll_bits & POLLIN)
/* An event has been signaled, inject an interrupt */
acrn_irqfd_inject(irqfd);
if (poll_bits & POLLHUP)
/* Do shutdown work in thread to hold wqh->lock */
queue_work(vm->irqfd_wq, &irqfd->shutdown);
return 0;
}
static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct hsm_irqfd *irqfd;
irqfd = container_of(pt, struct hsm_irqfd, pt);
add_wait_queue(wqh, &irqfd->wait);
}
/*
* Assign an eventfd to a VM and create a HSM irqfd associated with the
* eventfd. The properties of the HSM irqfd are built from a &struct
* acrn_irqfd.
*/
static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
{
struct eventfd_ctx *eventfd = NULL;
struct hsm_irqfd *irqfd, *tmp;
__poll_t events;
struct fd f;
int ret = 0;
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
irqfd->vm = vm;
memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
f = fdget(args->fd);
if (!f.file) {
ret = -EBADF;
goto out;
}
eventfd = eventfd_ctx_fileget(f.file);
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
}
irqfd->eventfd = eventfd;
/*
* Install custom wake-up handling to be notified whenever underlying
* eventfd is signaled.
*/
init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
mutex_lock(&vm->irqfds_lock);
list_for_each_entry(tmp, &vm->irqfds, list) {
if (irqfd->eventfd != tmp->eventfd)
continue;
ret = -EBUSY;
mutex_unlock(&vm->irqfds_lock);
goto fail;
}
list_add_tail(&irqfd->list, &vm->irqfds);
mutex_unlock(&vm->irqfds_lock);
/* Check the pending event in this stage */
events = vfs_poll(f.file, &irqfd->pt);
if (events & EPOLLIN)
acrn_irqfd_inject(irqfd);
fdput(f);
return 0;
fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
fdput(f);
out:
kfree(irqfd);
return ret;
}
static int acrn_irqfd_deassign(struct acrn_vm *vm,
struct acrn_irqfd *args)
{
struct hsm_irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
mutex_lock(&vm->irqfds_lock);
list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
if (irqfd->eventfd == eventfd) {
hsm_irqfd_shutdown(irqfd);
break;
}
}
mutex_unlock(&vm->irqfds_lock);
eventfd_ctx_put(eventfd);
return 0;
}
int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
{
int ret;
if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
ret = acrn_irqfd_deassign(vm, args);
else
ret = acrn_irqfd_assign(vm, args);
return ret;
}
int acrn_irqfd_init(struct acrn_vm *vm)
{
INIT_LIST_HEAD(&vm->irqfds);
mutex_init(&vm->irqfds_lock);
vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
if (!vm->irqfd_wq)
return -ENOMEM;
dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
return 0;
}
void acrn_irqfd_deinit(struct acrn_vm *vm)
{
struct hsm_irqfd *irqfd, *next;
dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
destroy_workqueue(vm->irqfd_wq);
mutex_lock(&vm->irqfds_lock);
list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
hsm_irqfd_shutdown(irqfd);
mutex_unlock(&vm->irqfds_lock);
}
| linux-master | drivers/virt/acrn/irqfd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACRN_HSM: Handle I/O requests
*
* Copyright (C) 2020 Intel Corporation. All rights reserved.
*
* Authors:
* Jason Chen CJ <[email protected]>
* Fengwei Yin <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/acrn.h>
#include "acrn_drv.h"
static void ioreq_pause(void);
static void ioreq_resume(void);
static void ioreq_dispatcher(struct work_struct *work);
static struct workqueue_struct *ioreq_wq;
static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
static inline bool has_pending_request(struct acrn_ioreq_client *client)
{
return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
}
static inline bool is_destroying(struct acrn_ioreq_client *client)
{
return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
}
static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
struct acrn_io_request *acrn_req)
{
bool polling_mode;
int ret = 0;
polling_mode = acrn_req->completion_polling;
/* Add barrier() to make sure the writes are done before completion */
smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
/*
* To fulfill the requirement of real-time in several industry
* scenarios, like automotive, ACRN can run under the partition mode,
* in which User VMs and Service VM are bound to dedicated CPU cores.
* Polling mode of handling the I/O request is introduced to achieve a
* faster I/O request handling. In polling mode, the hypervisor polls
* I/O request's completion. Once an I/O request is marked as
* ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
* to continue the I/O request flow. Thus, the completion notification
* from HSM of I/O request is not needed. Please note,
* completion_polling needs to be read before the I/O request being
* marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
* hypervisor.
*/
if (!polling_mode) {
ret = hcall_notify_req_finish(vm->vmid, vcpu);
if (ret < 0)
dev_err(acrn_dev.this_device,
"Notify I/O request finished failed!\n");
}
return ret;
}
static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
u16 vcpu,
struct acrn_io_request *acrn_req)
{
int ret;
if (vcpu >= client->vm->vcpu_num)
return -EINVAL;
clear_bit(vcpu, client->ioreqs_map);
if (!acrn_req) {
acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
acrn_req += vcpu;
}
ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
return ret;
}
int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
{
int ret = 0;
spin_lock_bh(&vm->ioreq_clients_lock);
if (vm->default_client)
ret = acrn_ioreq_complete_request(vm->default_client,
vcpu, NULL);
spin_unlock_bh(&vm->ioreq_clients_lock);
return ret;
}
/**
* acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
* @client: The ioreq client
* @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
* @start: Start address of iorange
* @end: End address of iorange
*
* Return: 0 on success, <0 on error
*/
int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
u32 type, u64 start, u64 end)
{
struct acrn_ioreq_range *range;
if (end < start) {
dev_err(acrn_dev.this_device,
"Invalid IO range [0x%llx,0x%llx]\n", start, end);
return -EINVAL;
}
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (!range)
return -ENOMEM;
range->type = type;
range->start = start;
range->end = end;
write_lock_bh(&client->range_lock);
list_add(&range->list, &client->range_list);
write_unlock_bh(&client->range_lock);
return 0;
}
/**
* acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
* @client: The ioreq client
* @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
* @start: Start address of iorange
* @end: End address of iorange
*/
void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
u32 type, u64 start, u64 end)
{
struct acrn_ioreq_range *range;
write_lock_bh(&client->range_lock);
list_for_each_entry(range, &client->range_list, list) {
if (type == range->type &&
start == range->start &&
end == range->end) {
list_del(&range->list);
kfree(range);
break;
}
}
write_unlock_bh(&client->range_lock);
}
/*
* ioreq_task() is the execution entity of handler thread of an I/O client.
* The handler callback of the I/O client is called within the handler thread.
*/
static int ioreq_task(void *data)
{
struct acrn_ioreq_client *client = data;
struct acrn_io_request *req;
unsigned long *ioreqs_map;
int vcpu, ret;
/*
* Lockless access to ioreqs_map is safe, because
* 1) set_bit() and clear_bit() are atomic operations.
* 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
* set_bit() - in ioreq_work handler
* Handler callback handles corresponding I/O request
* clear_bit() - in handler thread (include ACRN userspace)
* Mark corresponding I/O request completed
* Loop again if a new I/O request occurs
*/
ioreqs_map = client->ioreqs_map;
while (!kthread_should_stop()) {
acrn_ioreq_client_wait(client);
while (has_pending_request(client)) {
vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
req = client->vm->ioreq_buf->req_slot + vcpu;
ret = client->handler(client, req);
if (ret < 0) {
dev_err(acrn_dev.this_device,
"IO handle failure: %d\n", ret);
break;
}
acrn_ioreq_complete_request(client, vcpu, req);
}
}
return 0;
}
/*
* For the non-default I/O clients, give them chance to complete the current
* I/O requests if there are any. For the default I/O client, it is safe to
* clear all pending I/O requests because the clearing request is from ACRN
* userspace.
*/
void acrn_ioreq_request_clear(struct acrn_vm *vm)
{
struct acrn_ioreq_client *client;
bool has_pending = false;
unsigned long vcpu;
int retry = 10;
/*
* IO requests of this VM will be completed directly in
* acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
*/
set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
/*
* acrn_ioreq_request_clear is only called in VM reset case. Simply
* wait 100ms in total for the IO requests' completion.
*/
do {
spin_lock_bh(&vm->ioreq_clients_lock);
list_for_each_entry(client, &vm->ioreq_clients, list) {
has_pending = has_pending_request(client);
if (has_pending)
break;
}
spin_unlock_bh(&vm->ioreq_clients_lock);
if (has_pending)
schedule_timeout_interruptible(HZ / 100);
} while (has_pending && --retry > 0);
if (retry == 0)
dev_warn(acrn_dev.this_device,
"%s cannot flush pending request!\n", client->name);
/* Clear all ioreqs belonging to the default client */
spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client;
if (client) {
for_each_set_bit(vcpu, client->ioreqs_map, ACRN_IO_REQUEST_MAX)
acrn_ioreq_complete_request(client, vcpu, NULL);
}
spin_unlock_bh(&vm->ioreq_clients_lock);
/* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
}
int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
{
if (client->is_default) {
/*
* In the default client, a user space thread waits on the
* waitqueue. The is_destroying() check is used to notify user
* space the client is going to be destroyed.
*/
wait_event_interruptible(client->wq,
has_pending_request(client) ||
is_destroying(client));
if (is_destroying(client))
return -ENODEV;
} else {
wait_event_interruptible(client->wq,
has_pending_request(client) ||
kthread_should_stop());
}
return 0;
}
static bool is_cfg_addr(struct acrn_io_request *req)
{
return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
(req->reqs.pio_request.address == 0xcf8));
}
static bool is_cfg_data(struct acrn_io_request *req)
{
return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
((req->reqs.pio_request.address >= 0xcfc) &&
(req->reqs.pio_request.address < (0xcfc + 4))));
}
/* The low 8-bit of supported pci_reg addr.*/
#define PCI_LOWREG_MASK 0xFC
/* The high 4-bit of supported pci_reg addr */
#define PCI_HIGHREG_MASK 0xF00
/* Max number of supported functions */
#define PCI_FUNCMAX 7
/* Max number of supported slots */
#define PCI_SLOTMAX 31
/* Max number of supported buses */
#define PCI_BUSMAX 255
#define CONF1_ENABLE 0x80000000UL
/*
* A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
* following steps:
* 1) writes address into 0xCF8 port
* 2) accesses data in/from 0xCFC
* This function combines such paired PCI configuration space I/O requests into
* one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
*/
static bool handle_cf8cfc(struct acrn_vm *vm,
struct acrn_io_request *req, u16 vcpu)
{
int offset, pci_cfg_addr, pci_reg;
bool is_handled = false;
if (is_cfg_addr(req)) {
WARN_ON(req->reqs.pio_request.size != 4);
if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
vm->pci_conf_addr = req->reqs.pio_request.value;
else
req->reqs.pio_request.value = vm->pci_conf_addr;
is_handled = true;
} else if (is_cfg_data(req)) {
if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
if (req->reqs.pio_request.direction ==
ACRN_IOREQ_DIR_READ)
req->reqs.pio_request.value = 0xffffffff;
is_handled = true;
} else {
offset = req->reqs.pio_request.address - 0xcfc;
req->type = ACRN_IOREQ_TYPE_PCICFG;
pci_cfg_addr = vm->pci_conf_addr;
req->reqs.pci_request.bus =
(pci_cfg_addr >> 16) & PCI_BUSMAX;
req->reqs.pci_request.dev =
(pci_cfg_addr >> 11) & PCI_SLOTMAX;
req->reqs.pci_request.func =
(pci_cfg_addr >> 8) & PCI_FUNCMAX;
pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
req->reqs.pci_request.reg = pci_reg + offset;
}
}
if (is_handled)
ioreq_complete_request(vm, vcpu, req);
return is_handled;
}
static bool acrn_in_range(struct acrn_ioreq_range *range,
struct acrn_io_request *req)
{
bool ret = false;
if (range->type == req->type) {
switch (req->type) {
case ACRN_IOREQ_TYPE_MMIO:
if (req->reqs.mmio_request.address >= range->start &&
(req->reqs.mmio_request.address +
req->reqs.mmio_request.size - 1) <= range->end)
ret = true;
break;
case ACRN_IOREQ_TYPE_PORTIO:
if (req->reqs.pio_request.address >= range->start &&
(req->reqs.pio_request.address +
req->reqs.pio_request.size - 1) <= range->end)
ret = true;
break;
default:
break;
}
}
return ret;
}
static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
struct acrn_io_request *req)
{
struct acrn_ioreq_client *client, *found = NULL;
struct acrn_ioreq_range *range;
lockdep_assert_held(&vm->ioreq_clients_lock);
list_for_each_entry(client, &vm->ioreq_clients, list) {
read_lock_bh(&client->range_lock);
list_for_each_entry(range, &client->range_list, list) {
if (acrn_in_range(range, req)) {
found = client;
break;
}
}
read_unlock_bh(&client->range_lock);
if (found)
break;
}
return found ? found : vm->default_client;
}
/**
* acrn_ioreq_client_create() - Create an ioreq client
* @vm: The VM that this client belongs to
* @handler: The ioreq_handler of ioreq client acrn_hsm will create a kernel
* thread and call the handler to handle I/O requests.
* @priv: Private data for the handler
* @is_default: If it is the default client
* @name: The name of ioreq client
*
* Return: acrn_ioreq_client pointer on success, NULL on error
*/
struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
ioreq_handler_t handler,
void *priv, bool is_default,
const char *name)
{
struct acrn_ioreq_client *client;
if (!handler && !is_default) {
dev_dbg(acrn_dev.this_device,
"Cannot create non-default client w/o handler!\n");
return NULL;
}
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
client->handler = handler;
client->vm = vm;
client->priv = priv;
client->is_default = is_default;
if (name)
strncpy(client->name, name, sizeof(client->name) - 1);
rwlock_init(&client->range_lock);
INIT_LIST_HEAD(&client->range_list);
init_waitqueue_head(&client->wq);
if (client->handler) {
client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
client->vm->vmid, client->name);
if (IS_ERR(client->thread)) {
kfree(client);
return NULL;
}
}
spin_lock_bh(&vm->ioreq_clients_lock);
if (is_default)
vm->default_client = client;
else
list_add(&client->list, &vm->ioreq_clients);
spin_unlock_bh(&vm->ioreq_clients_lock);
dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
return client;
}
/**
* acrn_ioreq_client_destroy() - Destroy an ioreq client
* @client: The ioreq client
*/
void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
{
struct acrn_ioreq_range *range, *next;
struct acrn_vm *vm = client->vm;
dev_dbg(acrn_dev.this_device,
"Destroy ioreq client %s.\n", client->name);
ioreq_pause();
set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
if (client->is_default)
wake_up_interruptible(&client->wq);
else
kthread_stop(client->thread);
spin_lock_bh(&vm->ioreq_clients_lock);
if (client->is_default)
vm->default_client = NULL;
else
list_del(&client->list);
spin_unlock_bh(&vm->ioreq_clients_lock);
write_lock_bh(&client->range_lock);
list_for_each_entry_safe(range, next, &client->range_list, list) {
list_del(&range->list);
kfree(range);
}
write_unlock_bh(&client->range_lock);
kfree(client);
ioreq_resume();
}
static int acrn_ioreq_dispatch(struct acrn_vm *vm)
{
struct acrn_ioreq_client *client;
struct acrn_io_request *req;
int i;
for (i = 0; i < vm->vcpu_num; i++) {
req = vm->ioreq_buf->req_slot + i;
/* barrier the read of processed of acrn_io_request */
if (smp_load_acquire(&req->processed) ==
ACRN_IOREQ_STATE_PENDING) {
/* Complete the IO request directly in clearing stage */
if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
ioreq_complete_request(vm, i, req);
continue;
}
if (handle_cf8cfc(vm, req, i))
continue;
spin_lock_bh(&vm->ioreq_clients_lock);
client = find_ioreq_client(vm, req);
if (!client) {
dev_err(acrn_dev.this_device,
"Failed to find ioreq client!\n");
spin_unlock_bh(&vm->ioreq_clients_lock);
return -EINVAL;
}
if (!client->is_default)
req->kernel_handled = 1;
else
req->kernel_handled = 0;
/*
* Add barrier() to make sure the writes are done
* before setting ACRN_IOREQ_STATE_PROCESSING
*/
smp_store_release(&req->processed,
ACRN_IOREQ_STATE_PROCESSING);
set_bit(i, client->ioreqs_map);
wake_up_interruptible(&client->wq);
spin_unlock_bh(&vm->ioreq_clients_lock);
}
}
return 0;
}
static void ioreq_dispatcher(struct work_struct *work)
{
struct acrn_vm *vm;
read_lock(&acrn_vm_list_lock);
list_for_each_entry(vm, &acrn_vm_list, list) {
if (!vm->ioreq_buf)
break;
acrn_ioreq_dispatch(vm);
}
read_unlock(&acrn_vm_list_lock);
}
static void ioreq_intr_handler(void)
{
queue_work(ioreq_wq, &ioreq_work);
}
static void ioreq_pause(void)
{
/* Flush and unarm the handler to ensure no I/O requests pending */
acrn_remove_intr_handler();
drain_workqueue(ioreq_wq);
}
static void ioreq_resume(void)
{
/* Schedule after enabling in case other clients miss interrupt */
acrn_setup_intr_handler(ioreq_intr_handler);
queue_work(ioreq_wq, &ioreq_work);
}
int acrn_ioreq_intr_setup(void)
{
acrn_setup_intr_handler(ioreq_intr_handler);
ioreq_wq = alloc_ordered_workqueue("ioreq_wq",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ioreq_wq) {
dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
acrn_remove_intr_handler();
return -ENOMEM;
}
return 0;
}
void acrn_ioreq_intr_remove(void)
{
if (ioreq_wq)
destroy_workqueue(ioreq_wq);
acrn_remove_intr_handler();
}
int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
{
struct acrn_ioreq_buffer *set_buffer;
struct page *page;
int ret;
if (vm->ioreq_buf)
return -EEXIST;
set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
if (!set_buffer)
return -ENOMEM;
ret = pin_user_pages_fast(buf_vma, 1,
FOLL_WRITE | FOLL_LONGTERM, &page);
if (unlikely(ret != 1) || !page) {
dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
ret = -EFAULT;
goto free_buf;
}
vm->ioreq_buf = page_address(page);
vm->ioreq_page = page;
set_buffer->ioreq_buf = page_to_phys(page);
ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
if (ret < 0) {
dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
unpin_user_page(page);
vm->ioreq_buf = NULL;
goto free_buf;
}
dev_dbg(acrn_dev.this_device,
"Init ioreq buffer %pK!\n", vm->ioreq_buf);
ret = 0;
free_buf:
kfree(set_buffer);
return ret;
}
void acrn_ioreq_deinit(struct acrn_vm *vm)
{
struct acrn_ioreq_client *client, *next;
dev_dbg(acrn_dev.this_device,
"Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
/* Destroy all clients belonging to this VM */
list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
acrn_ioreq_client_destroy(client);
if (vm->default_client)
acrn_ioreq_client_destroy(vm->default_client);
if (vm->ioreq_buf && vm->ioreq_page) {
unpin_user_page(vm->ioreq_page);
vm->ioreq_buf = NULL;
}
}
| linux-master | drivers/virt/acrn/ioreq.c |
/*
* Runtime PM support code
*
* Copyright (C) 2009-2010 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/pm_clock.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
static struct dev_pm_domain default_pm_domain = {
.ops = {
USE_PM_CLK_RUNTIME_OPS
USE_PLATFORM_PM_SLEEP_OPS
},
};
static struct pm_clk_notifier_block platform_bus_notifier = {
.pm_domain = &default_pm_domain,
.con_ids = { NULL, },
};
static int __init sh_pm_runtime_init(void)
{
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(sh_pm_runtime_init);
| linux-master | drivers/sh/pm_runtime.c |
/*
* Support for virtual IRQ subgroups.
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include "internals.h"
static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
struct intc_virq_list {
unsigned int irq;
struct intc_virq_list *next;
};
#define for_each_virq(entry, head) \
for (entry = head; entry; entry = entry->next)
/*
* Tags for the radix tree
*/
#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&intc_big_lock, flags);
intc_irq_xlate[irq].enum_id = id;
intc_irq_xlate[irq].desc = d;
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
{
return intc_irq_xlate + irq;
}
int intc_irq_lookup(const char *chipname, intc_enum enum_id)
{
struct intc_map_entry *ptr;
struct intc_desc_int *d;
int irq = -1;
list_for_each_entry(d, &intc_list, list) {
int tagged;
if (strcmp(d->chip.name, chipname) != 0)
continue;
/*
* Catch early lookups for subgroup VIRQs that have not
* yet been allocated an IRQ. This already includes a
* fast-path out if the tree is untagged, so there is no
* need to explicitly test the root tree.
*/
tagged = radix_tree_tag_get(&d->tree, enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
if (unlikely(tagged))
break;
ptr = radix_tree_lookup(&d->tree, enum_id);
if (ptr) {
irq = ptr - intc_irq_xlate;
break;
}
}
return irq;
}
EXPORT_SYMBOL_GPL(intc_irq_lookup);
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
{
struct intc_virq_list *entry;
struct intc_virq_list **last = NULL;
/* scan for duplicates */
for_each_virq(entry, irq_get_handler_data(irq)) {
if (entry->irq == virq)
return 0;
last = &entry->next;
}
entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
entry->irq = virq;
if (last)
*last = entry;
else
irq_set_handler_data(irq, entry);
return 0;
}
static void intc_virq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(data);
struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
struct intc_desc_int *d = get_intc_desc(irq);
chip->irq_mask_ack(data);
for_each_virq(entry, vlist) {
unsigned long addr, handle;
struct irq_desc *vdesc = irq_to_desc(entry->irq);
if (vdesc) {
handle = (unsigned long)irq_desc_get_handler_data(vdesc);
addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
generic_handle_irq_desc(vdesc);
}
}
chip->irq_unmask(data);
}
static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
struct intc_desc_int *d,
unsigned int index)
{
unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
0, 1, (subgroup->reg_width - 1) - index);
}
static void __init intc_subgroup_init_one(struct intc_desc *desc,
struct intc_desc_int *d,
struct intc_subgroup *subgroup)
{
struct intc_map_entry *mapped;
unsigned int pirq;
unsigned long flags;
int i;
mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
if (!mapped) {
WARN_ON(1);
return;
}
pirq = mapped - intc_irq_xlate;
raw_spin_lock_irqsave(&d->lock, flags);
for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
struct intc_subgroup_entry *entry;
int err;
if (!subgroup->enum_ids[i])
continue;
entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
if (!entry)
break;
entry->pirq = pirq;
entry->enum_id = subgroup->enum_ids[i];
entry->handle = intc_subgroup_data(subgroup, d, i);
err = radix_tree_insert(&d->tree, entry->enum_id, entry);
if (unlikely(err < 0))
break;
radix_tree_tag_set(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
}
raw_spin_unlock_irqrestore(&d->lock, flags);
}
void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
{
int i;
if (!desc->hw.subgroups)
return;
for (i = 0; i < desc->hw.nr_subgroups; i++)
intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
}
static void __init intc_subgroup_map(struct intc_desc_int *d)
{
struct intc_subgroup_entry *entries[32];
unsigned long flags;
unsigned int nr_found;
int i;
raw_spin_lock_irqsave(&d->lock, flags);
restart:
nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
(void ***)entries, 0, ARRAY_SIZE(entries),
INTC_TAG_VIRQ_NEEDS_ALLOC);
for (i = 0; i < nr_found; i++) {
struct intc_subgroup_entry *entry;
int irq;
entry = radix_tree_deref_slot((void **)entries[i]);
if (unlikely(!entry))
continue;
if (radix_tree_deref_retry(entry))
goto restart;
irq = irq_alloc_desc(numa_node_id());
if (unlikely(irq < 0)) {
pr_err("no more free IRQs, bailing..\n");
break;
}
activate_irq(irq);
pr_info("Setting up a chained VIRQ from %d -> %d\n",
irq, entry->pirq);
intc_irq_xlate_set(irq, entry->enum_id, d);
irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
handle_simple_irq, "virq");
irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
irq_set_handler_data(irq, (void *)entry->handle);
/*
* Set the virtual IRQ as non-threadable.
*/
irq_set_nothread(irq);
/* Set handler data before installing the handler */
add_virq_to_pirq(entry->pirq, irq);
irq_set_chained_handler(entry->pirq, intc_virq_handler);
radix_tree_tag_clear(&d->tree, entry->enum_id,
INTC_TAG_VIRQ_NEEDS_ALLOC);
radix_tree_replace_slot(&d->tree, (void **)entries[i],
&intc_irq_xlate[irq]);
}
raw_spin_unlock_irqrestore(&d->lock, flags);
}
void __init intc_finalize(void)
{
struct intc_desc_int *d;
list_for_each_entry(d, &intc_list, list)
if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
intc_subgroup_map(d);
}
| linux-master | drivers/sh/intc/virq.c |
/*
* IRQ domain support for SH INTC subsystem
*
* Copyright (C) 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/irqdomain.h>
#include <linux/sh_intc.h>
#include <linux/export.h>
#include "internals.h"
/**
* intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs.
*
* This takes care of exception vector to hwirq translation through
* by way of evt2irq() translation.
*
* Note: For platforms that use a flat vector space without INTEVT this
* basically just mimics irq_domain_xlate_onecell() by way of a nopped
* out evt2irq() implementation.
*/
static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 1))
return -EINVAL;
*out_hwirq = evt2irq(intspec[0]);
*out_type = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops intc_evt_ops = {
.xlate = intc_evt_xlate,
};
void __init intc_irq_domain_init(struct intc_desc_int *d,
struct intc_hw_desc *hw)
{
unsigned int irq_base, irq_end;
/*
* Quick linear revmap check
*/
irq_base = evt2irq(hw->vectors[0].vect);
irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect);
/*
* Linear domains have a hard-wired assertion that IRQs start at
* 0 in order to make some performance optimizations. Lamely
* restrict the linear case to these conditions here, taking the
* tree penalty for linear cases with non-zero hwirq bases.
*/
if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1))
d->domain = irq_domain_add_linear(NULL, hw->nr_vectors,
&intc_evt_ops, NULL);
else
d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL);
BUG_ON(!d->domain);
}
| linux-master | drivers/sh/intc/irqdomain.c |
/*
* IRQ chip definitions for INTC IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpumask.h>
#include <linux/bsearch.h>
#include <linux/io.h>
#include "internals.h"
void _intc_enable(struct irq_data *data, unsigned long handle)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long addr;
unsigned int cpu;
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
[_INTC_FN(handle)], irq);
}
intc_balancing_enable(irq);
}
static void intc_enable(struct irq_data *data)
{
_intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data));
}
static void intc_disable(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data);
unsigned long addr;
unsigned int cpu;
intc_balancing_disable(irq);
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
#ifdef CONFIG_SMP
if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
continue;
#endif
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
[_INTC_FN(handle)], irq);
}
}
#ifdef CONFIG_SMP
/*
* This is held with the irq desc lock held, so we don't require any
* additional locking here at the intc desc level. The affinity mask is
* later tested in the enable/disable paths.
*/
static int intc_set_affinity(struct irq_data *data,
const struct cpumask *cpumask,
bool force)
{
if (!cpumask_intersects(cpumask, cpu_online_mask))
return -1;
irq_data_update_affinity(data, cpumask);
return IRQ_SET_MASK_OK_NOCOPY;
}
#endif
static void intc_mask_ack(struct irq_data *data)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = intc_get_ack_handle(irq);
void __iomem *addr;
intc_disable(data);
/* read register and write zero only to the associated bit */
if (handle) {
unsigned int value;
addr = (void __iomem *)INTC_REG(d, _INTC_ADDR_D(handle), 0);
value = intc_set_field_from_handle(0, 1, handle);
switch (_INTC_FN(handle)) {
case REG_FN_MODIFY_BASE + 0: /* 8bit */
__raw_readb(addr);
__raw_writeb(0xff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 1: /* 16bit */
__raw_readw(addr);
__raw_writew(0xffff ^ value, addr);
break;
case REG_FN_MODIFY_BASE + 3: /* 32bit */
__raw_readl(addr);
__raw_writel(0xffffffff ^ value, addr);
break;
default:
BUG();
break;
}
}
}
static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
unsigned int nr_hp,
unsigned int irq)
{
struct intc_handle_int key;
key.irq = irq;
key.handle = 0;
return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp);
}
int intc_set_priority(unsigned int irq, unsigned int prio)
{
struct intc_desc_int *d = get_intc_desc(irq);
struct irq_data *data = irq_get_irq_data(irq);
struct intc_handle_int *ihp;
if (!intc_get_prio_level(irq) || prio <= 1)
return -EINVAL;
ihp = intc_find_irq(d->prio, d->nr_prio, irq);
if (ihp) {
if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
intc_set_prio_level(irq, prio);
/*
* only set secondary masking method directly
* primary masking method is using intc_prio_level[irq]
* priority level will be set during next enable()
*/
if (_INTC_FN(ihp->handle) != REG_FN_ERR)
_intc_enable(data, ihp->handle);
}
return 0;
}
#define SENSE_VALID_FLAG 0x80
#define VALID(x) (x | SENSE_VALID_FLAG)
static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
[IRQ_TYPE_EDGE_FALLING] = VALID(0),
[IRQ_TYPE_EDGE_RISING] = VALID(1),
[IRQ_TYPE_LEVEL_LOW] = VALID(2),
/* SH7706, SH7707 and SH7709 do not support high level triggered */
#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
!defined(CONFIG_CPU_SUBTYPE_SH7707) && \
!defined(CONFIG_CPU_SUBTYPE_SH7709)
[IRQ_TYPE_LEVEL_HIGH] = VALID(3),
#endif
#if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */
[IRQ_TYPE_EDGE_BOTH] = VALID(4),
#endif
};
static int intc_set_type(struct irq_data *data, unsigned int type)
{
unsigned int irq = data->irq;
struct intc_desc_int *d = get_intc_desc(irq);
unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
struct intc_handle_int *ihp;
unsigned long addr;
if (!value)
return -EINVAL;
value &= ~SENSE_VALID_FLAG;
ihp = intc_find_irq(d->sense, d->nr_sense, irq);
if (ihp) {
/* PINT has 2-bit sense registers, should fail on EDGE_BOTH */
if (value >= (1 << _INTC_WIDTH(ihp->handle)))
return -EINVAL;
addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
}
return 0;
}
struct irq_chip intc_irq_chip = {
.irq_mask = intc_disable,
.irq_unmask = intc_enable,
.irq_mask_ack = intc_mask_ack,
.irq_enable = intc_enable,
.irq_disable = intc_disable,
.irq_set_type = intc_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = intc_set_affinity,
#endif
.flags = IRQCHIP_SKIP_SET_WAKE,
};
| linux-master | drivers/sh/intc/chip.c |
/*
* Shared interrupt handling code for IPR and INTC2 types of IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009, 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include "internals.h"
static unsigned long ack_handle[INTC_NR_IRQS];
static intc_enum __init intc_grp_id(struct intc_desc *desc,
intc_enum enum_id)
{
struct intc_group *g = desc->hw.groups;
unsigned int i, j;
for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
g = desc->hw.groups + i;
for (j = 0; g->enum_ids[j]; j++) {
if (g->enum_ids[j] != enum_id)
continue;
return g->enum_id;
}
}
return 0;
}
static unsigned int __init _intc_mask_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id,
unsigned int *reg_idx,
unsigned int *fld_idx)
{
struct intc_mask_reg *mr = desc->hw.mask_regs;
unsigned int fn, mode;
unsigned long reg_e, reg_d;
while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
mr = desc->hw.mask_regs + *reg_idx;
for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
if (mr->enum_ids[*fld_idx] != enum_id)
continue;
if (mr->set_reg && mr->clr_reg) {
fn = REG_FN_WRITE_BASE;
mode = MODE_DUAL_REG;
reg_e = mr->clr_reg;
reg_d = mr->set_reg;
} else {
fn = REG_FN_MODIFY_BASE;
if (mr->set_reg) {
mode = MODE_ENABLE_REG;
reg_e = mr->set_reg;
reg_d = mr->set_reg;
} else {
mode = MODE_MASK_REG;
reg_e = mr->clr_reg;
reg_d = mr->clr_reg;
}
}
fn += (mr->reg_width >> 3) - 1;
return _INTC_MK(fn, mode,
intc_get_reg(d, reg_e),
intc_get_reg(d, reg_d),
1,
(mr->reg_width - 1) - *fld_idx);
}
*fld_idx = 0;
(*reg_idx)++;
}
return 0;
}
unsigned int __init
intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d,
intc_enum enum_id, int do_grps)
{
unsigned int i = 0;
unsigned int j = 0;
unsigned int ret;
ret = _intc_mask_data(desc, d, enum_id, &i, &j);
if (ret)
return ret;
if (do_grps)
return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0);
return 0;
}
static unsigned int __init _intc_prio_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id,
unsigned int *reg_idx,
unsigned int *fld_idx)
{
struct intc_prio_reg *pr = desc->hw.prio_regs;
unsigned int fn, n, mode, bit;
unsigned long reg_e, reg_d;
while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
pr = desc->hw.prio_regs + *reg_idx;
for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
if (pr->enum_ids[*fld_idx] != enum_id)
continue;
if (pr->set_reg && pr->clr_reg) {
fn = REG_FN_WRITE_BASE;
mode = MODE_PCLR_REG;
reg_e = pr->set_reg;
reg_d = pr->clr_reg;
} else {
fn = REG_FN_MODIFY_BASE;
mode = MODE_PRIO_REG;
if (!pr->set_reg)
BUG();
reg_e = pr->set_reg;
reg_d = pr->set_reg;
}
fn += (pr->reg_width >> 3) - 1;
n = *fld_idx + 1;
BUG_ON(n * pr->field_width > pr->reg_width);
bit = pr->reg_width - (n * pr->field_width);
return _INTC_MK(fn, mode,
intc_get_reg(d, reg_e),
intc_get_reg(d, reg_d),
pr->field_width, bit);
}
*fld_idx = 0;
(*reg_idx)++;
}
return 0;
}
unsigned int __init
intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
intc_enum enum_id, int do_grps)
{
unsigned int i = 0;
unsigned int j = 0;
unsigned int ret;
ret = _intc_prio_data(desc, d, enum_id, &i, &j);
if (ret)
return ret;
if (do_grps)
return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0);
return 0;
}
static unsigned int intc_ack_data(struct intc_desc *desc,
struct intc_desc_int *d, intc_enum enum_id)
{
struct intc_mask_reg *mr = desc->hw.ack_regs;
unsigned int i, j, fn, mode;
unsigned long reg_e, reg_d;
for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
mr = desc->hw.ack_regs + i;
for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
if (mr->enum_ids[j] != enum_id)
continue;
fn = REG_FN_MODIFY_BASE;
mode = MODE_ENABLE_REG;
reg_e = mr->set_reg;
reg_d = mr->set_reg;
fn += (mr->reg_width >> 3) - 1;
return _INTC_MK(fn, mode,
intc_get_reg(d, reg_e),
intc_get_reg(d, reg_d),
1,
(mr->reg_width - 1) - j);
}
}
return 0;
}
static void intc_enable_disable(struct intc_desc_int *d,
unsigned long handle, int do_enable)
{
unsigned long addr;
unsigned int cpu;
unsigned long (*fn)(unsigned long, unsigned long,
unsigned long (*)(unsigned long, unsigned long,
unsigned long),
unsigned int);
if (do_enable) {
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
}
} else {
for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
fn = intc_disable_fns[_INTC_MODE(handle)];
fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
}
}
}
void __init intc_enable_disable_enum(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id, int enable)
{
unsigned int i, j, data;
/* go through and enable/disable all mask bits */
i = j = 0;
do {
data = _intc_mask_data(desc, d, enum_id, &i, &j);
if (data)
intc_enable_disable(d, data, enable);
j++;
} while (data);
/* go through and enable/disable all priority fields */
i = j = 0;
do {
data = _intc_prio_data(desc, d, enum_id, &i, &j);
if (data)
intc_enable_disable(d, data, enable);
j++;
} while (data);
}
unsigned int __init
intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d,
intc_enum enum_id)
{
struct intc_sense_reg *sr = desc->hw.sense_regs;
unsigned int i, j, fn, bit;
for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
sr = desc->hw.sense_regs + i;
for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
if (sr->enum_ids[j] != enum_id)
continue;
fn = REG_FN_MODIFY_BASE;
fn += (sr->reg_width >> 3) - 1;
BUG_ON((j + 1) * sr->field_width > sr->reg_width);
bit = sr->reg_width - ((j + 1) * sr->field_width);
return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
0, sr->field_width, bit);
}
}
return 0;
}
void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc,
struct intc_desc_int *d, intc_enum id)
{
unsigned long flags;
/*
* Nothing to do for this IRQ.
*/
if (!desc->hw.ack_regs)
return;
raw_spin_lock_irqsave(&intc_big_lock, flags);
ack_handle[irq] = intc_ack_data(desc, d, id);
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
unsigned long intc_get_ack_handle(unsigned int irq)
{
return ack_handle[irq];
}
| linux-master | drivers/sh/intc/handle.c |
/*
* Common INTC2 register accessors
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009, 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/io.h>
#include "internals.h"
unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address)
{
struct intc_window *window;
int k;
/* scan through physical windows and convert address */
for (k = 0; k < d->nr_windows; k++) {
window = d->window + k;
if (address < window->phys)
continue;
if (address >= (window->phys + window->size))
continue;
address -= window->phys;
address += (unsigned long)window->virt;
return address;
}
/* no windows defined, register must be 1:1 mapped virt:phys */
return address;
}
unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
{
unsigned int k;
address = intc_phys_to_virt(d, address);
for (k = 0; k < d->nr_reg; k++) {
if (d->reg[k] == address)
return k;
}
BUG();
return 0;
}
unsigned int intc_set_field_from_handle(unsigned int value,
unsigned int field_value,
unsigned int handle)
{
unsigned int width = _INTC_WIDTH(handle);
unsigned int shift = _INTC_SHIFT(handle);
value &= ~(((1 << width) - 1) << shift);
value |= field_value << shift;
return value;
}
unsigned long intc_get_field_from_handle(unsigned int value, unsigned int handle)
{
unsigned int width = _INTC_WIDTH(handle);
unsigned int shift = _INTC_SHIFT(handle);
unsigned int mask = ((1 << width) - 1) << shift;
return (value & mask) >> shift;
}
static unsigned long test_8(unsigned long addr, unsigned long h,
unsigned long ignore)
{
void __iomem *ptr = (void __iomem *)addr;
return intc_get_field_from_handle(__raw_readb(ptr), h);
}
static unsigned long test_16(unsigned long addr, unsigned long h,
unsigned long ignore)
{
void __iomem *ptr = (void __iomem *)addr;
return intc_get_field_from_handle(__raw_readw(ptr), h);
}
static unsigned long test_32(unsigned long addr, unsigned long h,
unsigned long ignore)
{
void __iomem *ptr = (void __iomem *)addr;
return intc_get_field_from_handle(__raw_readl(ptr), h);
}
static unsigned long write_8(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
__raw_writeb(intc_set_field_from_handle(0, data, h), ptr);
(void)__raw_readb(ptr); /* Defeat write posting */
return 0;
}
static unsigned long write_16(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
__raw_writew(intc_set_field_from_handle(0, data, h), ptr);
(void)__raw_readw(ptr); /* Defeat write posting */
return 0;
}
static unsigned long write_32(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
__raw_writel(intc_set_field_from_handle(0, data, h), ptr);
(void)__raw_readl(ptr); /* Defeat write posting */
return 0;
}
static unsigned long modify_8(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
unsigned long flags;
unsigned int value;
local_irq_save(flags);
value = intc_set_field_from_handle(__raw_readb(ptr), data, h);
__raw_writeb(value, ptr);
(void)__raw_readb(ptr); /* Defeat write posting */
local_irq_restore(flags);
return 0;
}
static unsigned long modify_16(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
unsigned long flags;
unsigned int value;
local_irq_save(flags);
value = intc_set_field_from_handle(__raw_readw(ptr), data, h);
__raw_writew(value, ptr);
(void)__raw_readw(ptr); /* Defeat write posting */
local_irq_restore(flags);
return 0;
}
static unsigned long modify_32(unsigned long addr, unsigned long h,
unsigned long data)
{
void __iomem *ptr = (void __iomem *)addr;
unsigned long flags;
unsigned int value;
local_irq_save(flags);
value = intc_set_field_from_handle(__raw_readl(ptr), data, h);
__raw_writel(value, ptr);
(void)__raw_readl(ptr); /* Defeat write posting */
local_irq_restore(flags);
return 0;
}
static unsigned long intc_mode_field(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq)
{
return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
}
static unsigned long intc_mode_zero(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq)
{
return fn(addr, handle, 0);
}
static unsigned long intc_mode_prio(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq)
{
return fn(addr, handle, intc_get_prio_level(irq));
}
unsigned long (*intc_reg_fns[])(unsigned long addr,
unsigned long h,
unsigned long data) = {
[REG_FN_TEST_BASE + 0] = test_8,
[REG_FN_TEST_BASE + 1] = test_16,
[REG_FN_TEST_BASE + 3] = test_32,
[REG_FN_WRITE_BASE + 0] = write_8,
[REG_FN_WRITE_BASE + 1] = write_16,
[REG_FN_WRITE_BASE + 3] = write_32,
[REG_FN_MODIFY_BASE + 0] = modify_8,
[REG_FN_MODIFY_BASE + 1] = modify_16,
[REG_FN_MODIFY_BASE + 3] = modify_32,
};
unsigned long (*intc_enable_fns[])(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq) = {
[MODE_ENABLE_REG] = intc_mode_field,
[MODE_MASK_REG] = intc_mode_zero,
[MODE_DUAL_REG] = intc_mode_field,
[MODE_PRIO_REG] = intc_mode_prio,
[MODE_PCLR_REG] = intc_mode_prio,
};
unsigned long (*intc_disable_fns[])(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq) = {
[MODE_ENABLE_REG] = intc_mode_zero,
[MODE_MASK_REG] = intc_mode_field,
[MODE_DUAL_REG] = intc_mode_field,
[MODE_PRIO_REG] = intc_mode_zero,
[MODE_PCLR_REG] = intc_mode_field,
};
unsigned long (*intc_enable_noprio_fns[])(unsigned long addr,
unsigned long handle,
unsigned long (*fn)(unsigned long,
unsigned long,
unsigned long),
unsigned int irq) = {
[MODE_ENABLE_REG] = intc_mode_field,
[MODE_MASK_REG] = intc_mode_zero,
[MODE_DUAL_REG] = intc_mode_field,
[MODE_PRIO_REG] = intc_mode_field,
[MODE_PCLR_REG] = intc_mode_field,
};
| linux-master | drivers/sh/intc/access.c |
/*
* Support for virtual IRQ subgroups debugfs mapping.
*
* Copyright (C) 2010 Paul Mundt
*
* Modelled after arch/powerpc/kernel/irq.c.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/debugfs.h>
#include "internals.h"
static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
int i;
seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
for (i = 1; i < nr_irqs; i++) {
struct intc_map_entry *entry = intc_irq_xlate_get(i);
struct intc_desc_int *desc = entry->desc;
if (!desc)
continue;
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05x ", entry->enum_id);
seq_printf(m, "%-15s\n", desc->chip.name);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
static int __init intc_irq_xlate_init(void)
{
/*
* XXX.. use arch_debugfs_dir here when all of the intc users are
* converted.
*/
if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
&intc_irq_xlate_fops) == NULL)
return -ENOMEM;
return 0;
}
fs_initcall(intc_irq_xlate_init);
| linux-master | drivers/sh/intc/virq-debugfs.c |
/*
* Support for hardware-managed IRQ auto-distribution.
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include "internals.h"
static unsigned long dist_handle[INTC_NR_IRQS];
void intc_balancing_enable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = dist_handle[irq];
unsigned long addr;
if (irq_balancing_disabled(irq) || !handle)
return;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
}
void intc_balancing_disable(unsigned int irq)
{
struct intc_desc_int *d = get_intc_desc(irq);
unsigned long handle = dist_handle[irq];
unsigned long addr;
if (irq_balancing_disabled(irq) || !handle)
return;
addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
}
static unsigned int intc_dist_data(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id)
{
struct intc_mask_reg *mr = desc->hw.mask_regs;
unsigned int i, j, fn, mode;
unsigned long reg_e, reg_d;
for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
mr = desc->hw.mask_regs + i;
/*
* Skip this entry if there's no auto-distribution
* register associated with it.
*/
if (!mr->dist_reg)
continue;
for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
if (mr->enum_ids[j] != enum_id)
continue;
fn = REG_FN_MODIFY_BASE;
mode = MODE_ENABLE_REG;
reg_e = mr->dist_reg;
reg_d = mr->dist_reg;
fn += (mr->reg_width >> 3) - 1;
return _INTC_MK(fn, mode,
intc_get_reg(d, reg_e),
intc_get_reg(d, reg_d),
1,
(mr->reg_width - 1) - j);
}
}
/*
* It's possible we've gotten here with no distribution options
* available for the IRQ in question, so we just skip over those.
*/
return 0;
}
void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc,
struct intc_desc_int *d, intc_enum id)
{
unsigned long flags;
/*
* Nothing to do for this IRQ.
*/
if (!desc->hw.mask_regs)
return;
raw_spin_lock_irqsave(&intc_big_lock, flags);
dist_handle[irq] = intc_dist_data(desc, d, id);
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
| linux-master | drivers/sh/intc/balancing.c |
/*
* Shared interrupt handling code for IPR and INTC2 types of IRQs.
*
* Copyright (C) 2007, 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
*
* Based on intc2.c and ipr.c
*
* Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
* Copyright (C) 2000 Kazumoto Kojima
* Copyright (C) 2001 David J. Mckay ([email protected])
* Copyright (C) 2003 Takashi Kusuda <[email protected]>
* Copyright (C) 2005, 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
#include <linux/irqdomain.h>
#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/radix-tree.h>
#include <linux/export.h>
#include <linux/sort.h>
#include "internals.h"
LIST_HEAD(intc_list);
DEFINE_RAW_SPINLOCK(intc_big_lock);
static unsigned int nr_intc_controllers;
/*
* Default priority level
* - this needs to be at least 2 for 5-bit priorities on 7780
*/
static unsigned int default_prio_level = 2; /* 2 - 16 */
static unsigned int intc_prio_level[INTC_NR_IRQS]; /* for now */
unsigned int intc_get_dfl_prio_level(void)
{
return default_prio_level;
}
unsigned int intc_get_prio_level(unsigned int irq)
{
return intc_prio_level[irq];
}
void intc_set_prio_level(unsigned int irq, unsigned int level)
{
unsigned long flags;
raw_spin_lock_irqsave(&intc_big_lock, flags);
intc_prio_level[irq] = level;
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
static void intc_redirect_irq(struct irq_desc *desc)
{
generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
}
static void __init intc_register_irq(struct intc_desc *desc,
struct intc_desc_int *d,
intc_enum enum_id,
unsigned int irq)
{
struct intc_handle_int *hp;
struct irq_data *irq_data;
unsigned int data[2], primary;
unsigned long flags;
raw_spin_lock_irqsave(&intc_big_lock, flags);
radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
/*
* Prefer single interrupt source bitmap over other combinations:
*
* 1. bitmap, single interrupt source
* 2. priority, single interrupt source
* 3. bitmap, multiple interrupt sources (groups)
* 4. priority, multiple interrupt sources (groups)
*/
data[0] = intc_get_mask_handle(desc, d, enum_id, 0);
data[1] = intc_get_prio_handle(desc, d, enum_id, 0);
primary = 0;
if (!data[0] && data[1])
primary = 1;
if (!data[0] && !data[1])
pr_warn("missing unique irq mask for irq %d (vect 0x%04x)\n",
irq, irq2evt(irq));
data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1);
data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1);
if (!data[primary])
primary ^= 1;
BUG_ON(!data[primary]); /* must have primary masking method */
irq_data = irq_get_irq_data(irq);
disable_irq_nosync(irq);
irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq,
"level");
irq_set_chip_data(irq, (void *)data[primary]);
/*
* set priority level
*/
intc_set_prio_level(irq, intc_get_dfl_prio_level());
/* enable secondary masking method if present */
if (data[!primary])
_intc_enable(irq_data, data[!primary]);
/* add irq to d->prio list if priority is available */
if (data[1]) {
hp = d->prio + d->nr_prio;
hp->irq = irq;
hp->handle = data[1];
if (primary) {
/*
* only secondary priority should access registers, so
* set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
*/
hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
}
d->nr_prio++;
}
/* add irq to d->sense list if sense is available */
data[0] = intc_get_sense_handle(desc, d, enum_id);
if (data[0]) {
(d->sense + d->nr_sense)->irq = irq;
(d->sense + d->nr_sense)->handle = data[0];
d->nr_sense++;
}
/* irq should be disabled by default */
d->chip.irq_mask(irq_data);
intc_set_ack_handle(irq, desc, d, enum_id);
intc_set_dist_handle(irq, desc, d, enum_id);
activate_irq(irq);
}
static unsigned int __init save_reg(struct intc_desc_int *d,
unsigned int cnt,
unsigned long value,
unsigned int smp)
{
if (value) {
value = intc_phys_to_virt(d, value);
d->reg[cnt] = value;
#ifdef CONFIG_SMP
d->smp[cnt] = smp;
#endif
return 1;
}
return 0;
}
static bool __init intc_map(struct irq_domain *domain, int irq)
{
if (!irq_to_desc(irq) && irq_alloc_desc_at(irq, NUMA_NO_NODE) != irq) {
pr_err("uname to allocate IRQ %d\n", irq);
return false;
}
if (irq_domain_associate(domain, irq, irq)) {
pr_err("domain association failure\n");
return false;
}
return true;
}
int __init register_intc_controller(struct intc_desc *desc)
{
unsigned int i, k, smp;
struct intc_hw_desc *hw = &desc->hw;
struct intc_desc_int *d;
struct resource *res;
pr_info("Registered controller '%s' with %u IRQs\n",
desc->name, hw->nr_vectors);
d = kzalloc(sizeof(*d), GFP_NOWAIT);
if (!d)
goto err0;
INIT_LIST_HEAD(&d->list);
list_add_tail(&d->list, &intc_list);
raw_spin_lock_init(&d->lock);
INIT_RADIX_TREE(&d->tree, GFP_ATOMIC);
d->index = nr_intc_controllers;
if (desc->num_resources) {
d->nr_windows = desc->num_resources;
d->window = kcalloc(d->nr_windows, sizeof(*d->window),
GFP_NOWAIT);
if (!d->window)
goto err1;
for (k = 0; k < d->nr_windows; k++) {
res = desc->resource + k;
WARN_ON(resource_type(res) != IORESOURCE_MEM);
d->window[k].phys = res->start;
d->window[k].size = resource_size(res);
d->window[k].virt = ioremap(res->start,
resource_size(res));
if (!d->window[k].virt)
goto err2;
}
}
d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
#ifdef CONFIG_INTC_BALANCING
if (d->nr_reg)
d->nr_reg += hw->nr_mask_regs;
#endif
d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
d->reg = kcalloc(d->nr_reg, sizeof(*d->reg), GFP_NOWAIT);
if (!d->reg)
goto err2;
#ifdef CONFIG_SMP
d->smp = kcalloc(d->nr_reg, sizeof(*d->smp), GFP_NOWAIT);
if (!d->smp)
goto err3;
#endif
k = 0;
if (hw->mask_regs) {
for (i = 0; i < hw->nr_mask_regs; i++) {
smp = IS_SMP(hw->mask_regs[i]);
k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
#ifdef CONFIG_INTC_BALANCING
k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
#endif
}
}
if (hw->prio_regs) {
d->prio = kcalloc(hw->nr_vectors, sizeof(*d->prio),
GFP_NOWAIT);
if (!d->prio)
goto err4;
for (i = 0; i < hw->nr_prio_regs; i++) {
smp = IS_SMP(hw->prio_regs[i]);
k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
}
sort(d->prio, hw->nr_prio_regs, sizeof(*d->prio),
intc_handle_int_cmp, NULL);
}
if (hw->sense_regs) {
d->sense = kcalloc(hw->nr_vectors, sizeof(*d->sense),
GFP_NOWAIT);
if (!d->sense)
goto err5;
for (i = 0; i < hw->nr_sense_regs; i++)
k += save_reg(d, k, hw->sense_regs[i].reg, 0);
sort(d->sense, hw->nr_sense_regs, sizeof(*d->sense),
intc_handle_int_cmp, NULL);
}
if (hw->subgroups)
for (i = 0; i < hw->nr_subgroups; i++)
if (hw->subgroups[i].reg)
k+= save_reg(d, k, hw->subgroups[i].reg, 0);
memcpy(&d->chip, &intc_irq_chip, sizeof(struct irq_chip));
d->chip.name = desc->name;
if (hw->ack_regs)
for (i = 0; i < hw->nr_ack_regs; i++)
k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
else
d->chip.irq_mask_ack = d->chip.irq_disable;
/* disable bits matching force_disable before registering irqs */
if (desc->force_disable)
intc_enable_disable_enum(desc, d, desc->force_disable, 0);
/* disable bits matching force_enable before registering irqs */
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 0);
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
intc_irq_domain_init(d, hw);
/* register the vectors one by one */
for (i = 0; i < hw->nr_vectors; i++) {
struct intc_vect *vect = hw->vectors + i;
unsigned int irq = evt2irq(vect->vect);
if (!vect->enum_id)
continue;
if (!intc_map(d->domain, irq))
continue;
intc_irq_xlate_set(irq, vect->enum_id, d);
intc_register_irq(desc, d, vect->enum_id, irq);
for (k = i + 1; k < hw->nr_vectors; k++) {
struct intc_vect *vect2 = hw->vectors + k;
unsigned int irq2 = evt2irq(vect2->vect);
if (vect->enum_id != vect2->enum_id)
continue;
/*
* In the case of multi-evt handling and sparse
* IRQ support, each vector still needs to have
* its own backing irq_desc.
*/
if (!intc_map(d->domain, irq2))
continue;
vect2->enum_id = 0;
/* redirect this interrupts to the first one */
irq_set_chip(irq2, &dummy_irq_chip);
irq_set_chained_handler_and_data(irq2,
intc_redirect_irq,
(void *)irq);
}
}
intc_subgroup_init(desc, d);
/* enable bits matching force_enable after registering irqs */
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
d->skip_suspend = desc->skip_syscore_suspend;
nr_intc_controllers++;
return 0;
err5:
kfree(d->prio);
err4:
#ifdef CONFIG_SMP
kfree(d->smp);
err3:
#endif
kfree(d->reg);
err2:
for (k = 0; k < d->nr_windows; k++)
if (d->window[k].virt)
iounmap(d->window[k].virt);
kfree(d->window);
err1:
kfree(d);
err0:
pr_err("unable to allocate INTC memory\n");
return -ENOMEM;
}
static int intc_suspend(void)
{
struct intc_desc_int *d;
list_for_each_entry(d, &intc_list, list) {
int irq;
if (d->skip_suspend)
continue;
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
data = irq_get_irq_data(irq);
chip = irq_data_get_irq_chip(data);
if (chip != &d->chip)
continue;
if (irqd_is_wakeup_set(data))
chip->irq_enable(data);
}
}
return 0;
}
static void intc_resume(void)
{
struct intc_desc_int *d;
list_for_each_entry(d, &intc_list, list) {
int irq;
if (d->skip_suspend)
continue;
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
data = irq_get_irq_data(irq);
chip = irq_data_get_irq_chip(data);
/*
* This will catch the redirect and VIRQ cases
* due to the dummy_irq_chip being inserted.
*/
if (chip != &d->chip)
continue;
if (irqd_irq_disabled(data))
chip->irq_disable(data);
else
chip->irq_enable(data);
}
}
}
struct syscore_ops intc_syscore_ops = {
.suspend = intc_suspend,
.resume = intc_resume,
};
struct bus_type intc_subsys = {
.name = "intc",
.dev_name = "intc",
};
static ssize_t
show_intc_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct intc_desc_int *d;
d = container_of(dev, struct intc_desc_int, dev);
return sprintf(buf, "%s\n", d->chip.name);
}
static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL);
static int __init register_intc_devs(void)
{
struct intc_desc_int *d;
int error;
register_syscore_ops(&intc_syscore_ops);
error = subsys_system_register(&intc_subsys, NULL);
if (!error) {
list_for_each_entry(d, &intc_list, list) {
d->dev.id = d->index;
d->dev.bus = &intc_subsys;
error = device_register(&d->dev);
if (error == 0)
error = device_create_file(&d->dev,
&dev_attr_name);
if (error)
break;
}
}
if (error)
pr_err("device registration error\n");
return error;
}
device_initcall(register_intc_devs);
| linux-master | drivers/sh/intc/core.c |
/*
* Support for hardware-assisted userspace interrupt masking.
*
* Copyright (C) 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
#include <linux/sizes.h>
#include "internals.h"
static void __iomem *uimask;
static ssize_t
show_intc_userimask(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
}
static ssize_t
store_intc_userimask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long level;
level = simple_strtoul(buf, NULL, 10);
/*
* Minimal acceptable IRQ levels are in the 2 - 16 range, but
* these are chomped so as to not interfere with normal IRQs.
*
* Level 1 is a special case on some CPUs in that it's not
* directly settable, but given that USERIMASK cuts off below a
* certain level, we don't care about this limitation here.
* Level 0 on the other hand equates to user masking disabled.
*
* We use the default priority level as a cut off so that only
* special case opt-in IRQs can be mangled.
*/
if (level >= intc_get_dfl_prio_level())
return -EINVAL;
__raw_writel(0xa5 << 24 | level << 4, uimask);
return count;
}
static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR,
show_intc_userimask, store_intc_userimask);
static int __init userimask_sysdev_init(void)
{
struct device *dev_root;
int ret = 0;
if (unlikely(!uimask))
return -ENXIO;
dev_root = bus_get_dev_root(&intc_subsys);
if (dev_root) {
ret = device_create_file(dev_root, &dev_attr_userimask);
put_device(dev_root);
}
return ret;
}
late_initcall(userimask_sysdev_init);
int register_intc_userimask(unsigned long addr)
{
if (unlikely(uimask))
return -EBUSY;
uimask = ioremap(addr, SZ_4K);
if (unlikely(!uimask))
return -ENOMEM;
pr_info("userimask support registered for levels 0 -> %d\n",
intc_get_dfl_prio_level() - 1);
return 0;
}
| linux-master | drivers/sh/intc/userimask.c |
/*
* drivers/sh/superhyway/superhyway-sysfs.c
*
* SuperHyway Bus sysfs interface
*
* Copyright (C) 2004, 2005 Paul Mundt <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/superhyway.h>
#define superhyway_ro_attr(name, fmt, field) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct superhyway_device *s = to_superhyway_device(dev); \
return sprintf(buf, fmt, s->field); \
} \
static DEVICE_ATTR_RO(name);
/* VCR flags */
superhyway_ro_attr(perr_flags, "0x%02x\n", vcr.perr_flags);
superhyway_ro_attr(merr_flags, "0x%02x\n", vcr.merr_flags);
superhyway_ro_attr(mod_vers, "0x%04x\n", vcr.mod_vers);
superhyway_ro_attr(mod_id, "0x%04x\n", vcr.mod_id);
superhyway_ro_attr(bot_mb, "0x%02x\n", vcr.bot_mb);
superhyway_ro_attr(top_mb, "0x%02x\n", vcr.top_mb);
/* Misc */
superhyway_ro_attr(resource, "0x%08lx\n", resource[0].start);
static struct attribute *superhyway_dev_attrs[] = {
&dev_attr_perr_flags.attr,
&dev_attr_merr_flags.attr,
&dev_attr_mod_vers.attr,
&dev_attr_mod_id.attr,
&dev_attr_bot_mb.attr,
&dev_attr_top_mb.attr,
&dev_attr_resource.attr,
NULL,
};
static const struct attribute_group superhyway_dev_group = {
.attrs = superhyway_dev_attrs,
};
const struct attribute_group *superhyway_dev_groups[] = {
&superhyway_dev_group,
NULL,
};
| linux-master | drivers/sh/superhyway/superhyway-sysfs.c |
/*
* drivers/sh/superhyway/superhyway.c
*
* SuperHyway Bus Driver
*
* Copyright (C) 2004, 2005 Paul Mundt <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/superhyway.h>
#include <linux/string.h>
#include <linux/slab.h>
static int superhyway_devices;
static struct device superhyway_bus_device = {
.init_name = "superhyway",
};
static void superhyway_device_release(struct device *dev)
{
struct superhyway_device *sdev = to_superhyway_device(dev);
kfree(sdev->resource);
kfree(sdev);
}
/**
* superhyway_add_device - Add a SuperHyway module
* @base: Physical address where module is mapped.
* @sdev: SuperHyway device to add, or NULL to allocate a new one.
* @bus: Bus where SuperHyway module resides.
*
* This is responsible for adding a new SuperHyway module. This sets up a new
* struct superhyway_device for the module being added if @sdev == NULL.
*
* Devices are initially added in the order that they are scanned (from the
* top-down of the memory map), and are assigned an ID based on the order that
* they are added. Any manual addition of a module will thus get the ID after
* the devices already discovered regardless of where it resides in memory.
*
* Further work can and should be done in superhyway_scan_bus(), to be sure
* that any new modules are properly discovered and subsequently registered.
*/
int superhyway_add_device(unsigned long base, struct superhyway_device *sdev,
struct superhyway_bus *bus)
{
struct superhyway_device *dev = sdev;
if (!dev) {
dev = kzalloc(sizeof(struct superhyway_device), GFP_KERNEL);
if (!dev)
return -ENOMEM;
}
dev->bus = bus;
superhyway_read_vcr(dev, base, &dev->vcr);
if (!dev->resource) {
dev->resource = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!dev->resource) {
kfree(dev);
return -ENOMEM;
}
dev->resource->name = dev->name;
dev->resource->start = base;
dev->resource->end = dev->resource->start + 0x01000000;
}
dev->dev.parent = &superhyway_bus_device;
dev->dev.bus = &superhyway_bus_type;
dev->dev.release = superhyway_device_release;
dev->id.id = dev->vcr.mod_id;
sprintf(dev->name, "SuperHyway device %04x", dev->id.id);
dev_set_name(&dev->dev, "%02x", superhyway_devices);
superhyway_devices++;
return device_register(&dev->dev);
}
int superhyway_add_devices(struct superhyway_bus *bus,
struct superhyway_device **devices,
int nr_devices)
{
int i, ret = 0;
for (i = 0; i < nr_devices; i++) {
struct superhyway_device *dev = devices[i];
ret |= superhyway_add_device(dev->resource[0].start, dev, bus);
}
return ret;
}
static int __init superhyway_init(void)
{
struct superhyway_bus *bus;
int ret;
ret = device_register(&superhyway_bus_device);
if (unlikely(ret))
return ret;
for (bus = superhyway_channels; bus->ops; bus++)
ret |= superhyway_scan_bus(bus);
return ret;
}
postcore_initcall(superhyway_init);
static const struct superhyway_device_id *
superhyway_match_id(const struct superhyway_device_id *ids,
struct superhyway_device *dev)
{
while (ids->id) {
if (ids->id == dev->id.id)
return ids;
ids++;
}
return NULL;
}
static int superhyway_device_probe(struct device *dev)
{
struct superhyway_device *shyway_dev = to_superhyway_device(dev);
struct superhyway_driver *shyway_drv = to_superhyway_driver(dev->driver);
if (shyway_drv && shyway_drv->probe) {
const struct superhyway_device_id *id;
id = superhyway_match_id(shyway_drv->id_table, shyway_dev);
if (id)
return shyway_drv->probe(shyway_dev, id);
}
return -ENODEV;
}
static void superhyway_device_remove(struct device *dev)
{
struct superhyway_device *shyway_dev = to_superhyway_device(dev);
struct superhyway_driver *shyway_drv = to_superhyway_driver(dev->driver);
if (shyway_drv->remove)
shyway_drv->remove(shyway_dev);
}
/**
* superhyway_register_driver - Register a new SuperHyway driver
* @drv: SuperHyway driver to register.
*
* This registers the passed in @drv. Any devices matching the id table will
* automatically be populated and handed off to the driver's specified probe
* routine.
*/
int superhyway_register_driver(struct superhyway_driver *drv)
{
drv->drv.name = drv->name;
drv->drv.bus = &superhyway_bus_type;
return driver_register(&drv->drv);
}
/**
* superhyway_unregister_driver - Unregister a SuperHyway driver
* @drv: SuperHyway driver to unregister.
*
* This cleans up after superhyway_register_driver(), and should be invoked in
* the exit path of any module drivers.
*/
void superhyway_unregister_driver(struct superhyway_driver *drv)
{
driver_unregister(&drv->drv);
}
static int superhyway_bus_match(struct device *dev, struct device_driver *drv)
{
struct superhyway_device *shyway_dev = to_superhyway_device(dev);
struct superhyway_driver *shyway_drv = to_superhyway_driver(drv);
const struct superhyway_device_id *ids = shyway_drv->id_table;
if (!ids)
return -EINVAL;
if (superhyway_match_id(ids, shyway_dev))
return 1;
return -ENODEV;
}
struct bus_type superhyway_bus_type = {
.name = "superhyway",
.match = superhyway_bus_match,
#ifdef CONFIG_SYSFS
.dev_groups = superhyway_dev_groups,
#endif
.probe = superhyway_device_probe,
.remove = superhyway_device_remove,
};
static int __init superhyway_bus_init(void)
{
return bus_register(&superhyway_bus_type);
}
static void __exit superhyway_bus_exit(void)
{
device_unregister(&superhyway_bus_device);
bus_unregister(&superhyway_bus_type);
}
core_initcall(superhyway_bus_init);
module_exit(superhyway_bus_exit);
EXPORT_SYMBOL(superhyway_bus_type);
EXPORT_SYMBOL(superhyway_add_device);
EXPORT_SYMBOL(superhyway_add_devices);
EXPORT_SYMBOL(superhyway_register_driver);
EXPORT_SYMBOL(superhyway_unregister_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/sh/superhyway/superhyway.c |
/*
* Core maple bus functionality
*
* Copyright (C) 2007 - 2009 Adrian McMenamin
* Copyright (C) 2001 - 2008 Paul Mundt
* Copyright (C) 2000 - 2001 YAEGASHI Takeshi
* Copyright (C) 2001 M. R. Brown
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/maple.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <mach/dma.h>
#include <mach/sysasic.h>
MODULE_AUTHOR("Adrian McMenamin <[email protected]>");
MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
MODULE_LICENSE("GPL v2");
static void maple_dma_handler(struct work_struct *work);
static void maple_vblank_handler(struct work_struct *work);
static DECLARE_WORK(maple_dma_process, maple_dma_handler);
static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);
static LIST_HEAD(maple_waitq);
static LIST_HEAD(maple_sentq);
/* mutex to protect queue of waiting packets */
static DEFINE_MUTEX(maple_wlist_lock);
static struct maple_driver maple_unsupported_device;
static struct device maple_bus;
static int subdevice_map[MAPLE_PORTS];
static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
static unsigned long maple_pnp_time;
static int started, scanning, fullscan;
static struct kmem_cache *maple_queue_cache;
struct maple_device_specify {
int port;
int unit;
};
static bool checked[MAPLE_PORTS];
static bool empty[MAPLE_PORTS];
static struct maple_device *baseunits[MAPLE_PORTS];
/**
* maple_driver_register - register a maple driver
* @drv: maple driver to be registered.
*
* Registers the passed in @drv, while updating the bus type.
* Devices with matching function IDs will be automatically probed.
*/
int maple_driver_register(struct maple_driver *drv)
{
if (!drv)
return -EINVAL;
drv->drv.bus = &maple_bus_type;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL_GPL(maple_driver_register);
/**
* maple_driver_unregister - unregister a maple driver.
* @drv: maple driver to unregister.
*
* Cleans up after maple_driver_register(). To be invoked in the exit
* path of any module drivers.
*/
void maple_driver_unregister(struct maple_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL_GPL(maple_driver_unregister);
/* set hardware registers to enable next round of dma */
static void maple_dma_reset(void)
{
__raw_writel(MAPLE_MAGIC, MAPLE_RESET);
/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
__raw_writel(1, MAPLE_TRIGTYPE);
/*
* Maple system register
* bits 31 - 16 timeout in units of 20nsec
* bit 12 hard trigger - set 0 to keep responding to VBLANK
* bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
* bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
* max delay is 11
*/
__raw_writel(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
__raw_writel(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
__raw_writel(1, MAPLE_ENABLE);
}
/**
* maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
* @dev: device responding
* @callback: handler callback
* @interval: interval in jiffies between callbacks
* @function: the function code for the device
*/
void maple_getcond_callback(struct maple_device *dev,
void (*callback) (struct mapleq *mq),
unsigned long interval, unsigned long function)
{
dev->callback = callback;
dev->interval = interval;
dev->function = cpu_to_be32(function);
dev->when = jiffies;
}
EXPORT_SYMBOL_GPL(maple_getcond_callback);
static int maple_dma_done(void)
{
return (__raw_readl(MAPLE_STATE) & 1) == 0;
}
static void maple_release_device(struct device *dev)
{
struct maple_device *mdev;
struct mapleq *mq;
mdev = to_maple_dev(dev);
mq = mdev->mq;
kmem_cache_free(maple_queue_cache, mq->recvbuf);
kfree(mq);
kfree(mdev);
}
/**
* maple_add_packet - add a single instruction to the maple bus queue
* @mdev: maple device
* @function: function on device being queried
* @command: maple command to add
* @length: length of command string (in 32 bit words)
* @data: remainder of command string
*/
int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
size_t length, void *data)
{
int ret = 0;
void *sendbuf = NULL;
if (length) {
sendbuf = kcalloc(length, 4, GFP_KERNEL);
if (!sendbuf) {
ret = -ENOMEM;
goto out;
}
((__be32 *)sendbuf)[0] = cpu_to_be32(function);
}
mdev->mq->command = command;
mdev->mq->length = length;
if (length > 1)
memcpy(sendbuf + 4, data, (length - 1) * 4);
mdev->mq->sendbuf = sendbuf;
mutex_lock(&maple_wlist_lock);
list_add_tail(&mdev->mq->list, &maple_waitq);
mutex_unlock(&maple_wlist_lock);
out:
return ret;
}
EXPORT_SYMBOL_GPL(maple_add_packet);
static struct mapleq *maple_allocq(struct maple_device *mdev)
{
struct mapleq *mq;
mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
goto failed_nomem;
INIT_LIST_HEAD(&mq->list);
mq->dev = mdev;
mq->recvbuf = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
if (!mq->recvbuf)
goto failed_p2;
mq->recvbuf->buf = &((mq->recvbuf->bufx)[0]);
return mq;
failed_p2:
kfree(mq);
failed_nomem:
dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
mdev->port, mdev->unit);
return NULL;
}
static struct maple_device *maple_alloc_dev(int port, int unit)
{
struct maple_device *mdev;
/* zero this out to avoid kobj subsystem
* thinking it has already been registered */
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return NULL;
mdev->port = port;
mdev->unit = unit;
mdev->mq = maple_allocq(mdev);
if (!mdev->mq) {
kfree(mdev);
return NULL;
}
mdev->dev.bus = &maple_bus_type;
mdev->dev.parent = &maple_bus;
init_waitqueue_head(&mdev->maple_wait);
return mdev;
}
static void maple_free_dev(struct maple_device *mdev)
{
kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
kfree(mdev->mq);
kfree(mdev);
}
/* process the command queue into a maple command block
* terminating command has bit 32 of first long set to 0
*/
static void maple_build_block(struct mapleq *mq)
{
int port, unit, from, to, len;
unsigned long *lsendbuf = mq->sendbuf;
port = mq->dev->port & 3;
unit = mq->dev->unit;
len = mq->length;
from = port << 6;
to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
*maple_lastptr &= 0x7fffffff;
maple_lastptr = maple_sendptr;
*maple_sendptr++ = (port << 16) | len | 0x80000000;
*maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
*maple_sendptr++ =
mq->command | (to << 8) | (from << 16) | (len << 24);
while (len-- > 0)
*maple_sendptr++ = *lsendbuf++;
}
/* build up command queue */
static void maple_send(void)
{
int i, maple_packets = 0;
struct mapleq *mq, *nmq;
if (!maple_dma_done())
return;
/* disable DMA */
__raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq))
goto finish;
mutex_lock(&maple_wlist_lock);
if (list_empty(&maple_waitq)) {
mutex_unlock(&maple_wlist_lock);
goto finish;
}
maple_lastptr = maple_sendbuf;
maple_sendptr = maple_sendbuf;
list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
maple_build_block(mq);
list_del_init(&mq->list);
list_add_tail(&mq->list, &maple_sentq);
if (maple_packets++ > MAPLE_MAXPACKETS)
break;
}
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
__flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE);
}
finish:
maple_dma_reset();
}
/* check if there is a driver registered likely to match this device */
static int maple_check_matching_driver(struct device_driver *driver,
void *devptr)
{
struct maple_driver *maple_drv;
struct maple_device *mdev;
mdev = devptr;
maple_drv = to_maple_driver(driver);
if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
return 1;
return 0;
}
static void maple_detach_driver(struct maple_device *mdev)
{
device_unregister(&mdev->dev);
}
/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
static void maple_attach_driver(struct maple_device *mdev)
{
char *p, *recvbuf;
unsigned long function;
int matched, error;
recvbuf = mdev->mq->recvbuf->buf;
/* copy the data as individual elements in
* case of memory optimisation */
memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
mdev->product_name[30] = '\0';
memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
mdev->product_licence[60] = '\0';
for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
if (*p == ' ')
*p = '\0';
else
break;
for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
if (*p == ' ')
*p = '\0';
else
break;
function = be32_to_cpu(mdev->devinfo.function);
dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
mdev->product_name, function, mdev->port, mdev->unit);
if (function > 0x200) {
/* Do this silently - as not a real device */
function = 0;
mdev->driver = &maple_unsupported_device;
dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
} else {
matched =
bus_for_each_drv(&maple_bus_type, NULL, mdev,
maple_check_matching_driver);
if (matched == 0) {
/* Driver does not exist yet */
dev_info(&mdev->dev, "no driver found\n");
mdev->driver = &maple_unsupported_device;
}
dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
mdev->unit, function);
}
mdev->function = function;
mdev->dev.release = &maple_release_device;
atomic_set(&mdev->busy, 0);
error = device_register(&mdev->dev);
if (error) {
dev_warn(&mdev->dev, "could not register device at"
" (%d, %d), with error 0x%X\n", mdev->unit,
mdev->port, error);
maple_free_dev(mdev);
mdev = NULL;
return;
}
}
/*
* if device has been registered for the given
* port and unit then return 1 - allows identification
* of which devices need to be attached or detached
*/
static int check_maple_device(struct device *device, void *portptr)
{
struct maple_device_specify *ds;
struct maple_device *mdev;
ds = portptr;
mdev = to_maple_dev(device);
if (mdev->port == ds->port && mdev->unit == ds->unit)
return 1;
return 0;
}
static int setup_maple_commands(struct device *device, void *ignored)
{
int add;
struct maple_device *mdev = to_maple_dev(device);
if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
time_after(jiffies, mdev->when)) {
/* bounce if we cannot add */
add = maple_add_packet(mdev,
be32_to_cpu(mdev->devinfo.function),
MAPLE_COMMAND_GETCOND, 1, NULL);
if (!add)
mdev->when = jiffies + mdev->interval;
} else {
if (time_after(jiffies, maple_pnp_time))
/* Ensure we don't have block reads and devinfo
* calls interfering with one another - so flag the
* device as busy */
if (atomic_read(&mdev->busy) == 0) {
atomic_set(&mdev->busy, 1);
maple_add_packet(mdev, 0,
MAPLE_COMMAND_DEVINFO, 0, NULL);
}
}
return 0;
}
/* VBLANK bottom half - implemented via workqueue */
static void maple_vblank_handler(struct work_struct *work)
{
int x, locking;
struct maple_device *mdev;
if (!maple_dma_done())
return;
__raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq))
goto finish;
/*
* Set up essential commands - to fetch data and
* check devices are still present
*/
bus_for_each_dev(&maple_bus_type, NULL, NULL,
setup_maple_commands);
if (time_after(jiffies, maple_pnp_time)) {
/*
* Scan the empty ports - bus is flakey and may have
* mis-reported emptyness
*/
for (x = 0; x < MAPLE_PORTS; x++) {
if (checked[x] && empty[x]) {
mdev = baseunits[x];
if (!mdev)
break;
atomic_set(&mdev->busy, 1);
locking = maple_add_packet(mdev, 0,
MAPLE_COMMAND_DEVINFO, 0, NULL);
if (!locking)
break;
}
}
maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
}
finish:
maple_send();
}
/* handle devices added via hotplugs - placing them on queue for DEVINFO */
static void maple_map_subunits(struct maple_device *mdev, int submask)
{
int retval, k, devcheck;
struct maple_device *mdev_add;
struct maple_device_specify ds;
ds.port = mdev->port;
for (k = 0; k < 5; k++) {
ds.unit = k + 1;
retval =
bus_for_each_dev(&maple_bus_type, NULL, &ds,
check_maple_device);
if (retval) {
submask = submask >> 1;
continue;
}
devcheck = submask & 0x01;
if (devcheck) {
mdev_add = maple_alloc_dev(mdev->port, k + 1);
if (!mdev_add)
return;
atomic_set(&mdev_add->busy, 1);
maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
0, NULL);
/* mark that we are checking sub devices */
scanning = 1;
}
submask = submask >> 1;
}
}
/* mark a device as removed */
static void maple_clean_submap(struct maple_device *mdev)
{
int killbit;
killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
killbit = ~killbit;
killbit &= 0xFF;
subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
}
/* handle empty port or hotplug removal */
static void maple_response_none(struct maple_device *mdev)
{
maple_clean_submap(mdev);
if (likely(mdev->unit != 0)) {
/*
* Block devices play up
* and give the impression they have
* been removed even when still in place or
* trip the mtd layer when they have
* really gone - this code traps that eventuality
* and ensures we aren't overloaded with useless
* error messages
*/
if (mdev->can_unload) {
if (!mdev->can_unload(mdev)) {
atomic_set(&mdev->busy, 2);
wake_up(&mdev->maple_wait);
return;
}
}
dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
mdev->port, mdev->unit);
maple_detach_driver(mdev);
return;
} else {
if (!started || !fullscan) {
if (checked[mdev->port] == false) {
checked[mdev->port] = true;
empty[mdev->port] = true;
dev_info(&mdev->dev, "no devices"
" to port %d\n", mdev->port);
}
return;
}
}
/* Some hardware devices generate false detach messages on unit 0 */
atomic_set(&mdev->busy, 0);
}
/* preprocess hotplugs or scans */
static void maple_response_devinfo(struct maple_device *mdev,
char *recvbuf)
{
char submask;
if (!started || (scanning == 2) || !fullscan) {
if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
checked[mdev->port] = true;
maple_attach_driver(mdev);
} else {
if (mdev->unit != 0)
maple_attach_driver(mdev);
if (mdev->unit == 0) {
empty[mdev->port] = false;
maple_attach_driver(mdev);
}
}
}
if (mdev->unit == 0) {
submask = recvbuf[2] & 0x1F;
if (submask ^ subdevice_map[mdev->port]) {
maple_map_subunits(mdev, submask);
subdevice_map[mdev->port] = submask;
}
}
}
static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
{
if (mdev->fileerr_handler) {
mdev->fileerr_handler(mdev, recvbuf);
return;
} else
dev_warn(&mdev->dev, "device at (%d, %d) reports"
"file error 0x%X\n", mdev->port, mdev->unit,
((int *)recvbuf)[1]);
}
static void maple_port_rescan(void)
{
int i;
struct maple_device *mdev;
fullscan = 1;
for (i = 0; i < MAPLE_PORTS; i++) {
if (checked[i] == false) {
fullscan = 0;
mdev = baseunits[i];
maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
0, NULL);
}
}
}
/* maple dma end bottom half - implemented via workqueue */
static void maple_dma_handler(struct work_struct *work)
{
struct mapleq *mq, *nmq;
struct maple_device *mdev;
char *recvbuf;
enum maple_code code;
if (!maple_dma_done())
return;
__raw_writel(0, MAPLE_ENABLE);
if (!list_empty(&maple_sentq)) {
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
__flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
switch (code) {
case MAPLE_RESPONSE_NONE:
maple_response_none(mdev);
break;
case MAPLE_RESPONSE_DEVINFO:
maple_response_devinfo(mdev, recvbuf);
atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_DATATRF:
if (mdev->callback)
mdev->callback(mq);
atomic_set(&mdev->busy, 0);
wake_up(&mdev->maple_wait);
break;
case MAPLE_RESPONSE_FILEERR:
maple_response_fileerr(mdev, recvbuf);
atomic_set(&mdev->busy, 0);
wake_up(&mdev->maple_wait);
break;
case MAPLE_RESPONSE_AGAIN:
case MAPLE_RESPONSE_BADCMD:
case MAPLE_RESPONSE_BADFUNC:
dev_warn(&mdev->dev, "non-fatal error"
" 0x%X at (%d, %d)\n", code,
mdev->port, mdev->unit);
atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_ALLINFO:
dev_notice(&mdev->dev, "extended"
" device information request for (%d, %d)"
" but call is not supported\n", mdev->port,
mdev->unit);
atomic_set(&mdev->busy, 0);
break;
case MAPLE_RESPONSE_OK:
atomic_set(&mdev->busy, 0);
wake_up(&mdev->maple_wait);
break;
default:
break;
}
}
/* if scanning is 1 then we have subdevices to check */
if (scanning == 1) {
maple_send();
scanning = 2;
} else
scanning = 0;
/*check if we have actually tested all ports yet */
if (!fullscan)
maple_port_rescan();
/* mark that we have been through the first scan */
started = 1;
}
maple_send();
}
static irqreturn_t maple_dma_interrupt(int irq, void *dev_id)
{
/* Load everything into the bottom half */
schedule_work(&maple_dma_process);
return IRQ_HANDLED;
}
static irqreturn_t maple_vblank_interrupt(int irq, void *dev_id)
{
schedule_work(&maple_vblank_process);
return IRQ_HANDLED;
}
static int maple_set_dma_interrupt_handler(void)
{
return request_irq(HW_EVENT_MAPLE_DMA, maple_dma_interrupt,
IRQF_SHARED, "maple bus DMA", &maple_unsupported_device);
}
static int maple_set_vblank_interrupt_handler(void)
{
return request_irq(HW_EVENT_VSYNC, maple_vblank_interrupt,
IRQF_SHARED, "maple bus VBLANK", &maple_unsupported_device);
}
static int maple_get_dma_buffer(void)
{
maple_sendbuf =
(void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
MAPLE_DMA_PAGES);
if (!maple_sendbuf)
return -ENOMEM;
return 0;
}
static int maple_match_bus_driver(struct device *devptr,
struct device_driver *drvptr)
{
struct maple_driver *maple_drv = to_maple_driver(drvptr);
struct maple_device *maple_dev = to_maple_dev(devptr);
/* Trap empty port case */
if (maple_dev->devinfo.function == 0xFFFFFFFF)
return 0;
else if (maple_dev->devinfo.function &
cpu_to_be32(maple_drv->function))
return 1;
return 0;
}
static void maple_bus_release(struct device *dev)
{
}
static struct maple_driver maple_unsupported_device = {
.drv = {
.name = "maple_unsupported_device",
.bus = &maple_bus_type,
},
};
/*
* maple_bus_type - core maple bus structure
*/
struct bus_type maple_bus_type = {
.name = "maple",
.match = maple_match_bus_driver,
};
EXPORT_SYMBOL_GPL(maple_bus_type);
static struct device maple_bus = {
.init_name = "maple",
.release = maple_bus_release,
};
static int __init maple_bus_init(void)
{
int retval, i;
struct maple_device *mdev[MAPLE_PORTS];
__raw_writel(0, MAPLE_ENABLE);
retval = device_register(&maple_bus);
if (retval)
goto cleanup;
retval = bus_register(&maple_bus_type);
if (retval)
goto cleanup_device;
retval = driver_register(&maple_unsupported_device.drv);
if (retval)
goto cleanup_bus;
/* allocate memory for maple bus dma */
retval = maple_get_dma_buffer();
if (retval) {
dev_err(&maple_bus, "failed to allocate DMA buffers\n");
goto cleanup_basic;
}
/* set up DMA interrupt handler */
retval = maple_set_dma_interrupt_handler();
if (retval) {
dev_err(&maple_bus, "bus failed to grab maple "
"DMA IRQ\n");
goto cleanup_dma;
}
/* set up VBLANK interrupt handler */
retval = maple_set_vblank_interrupt_handler();
if (retval) {
dev_err(&maple_bus, "bus failed to grab VBLANK IRQ\n");
goto cleanup_irq;
}
maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN);
if (!maple_queue_cache) {
retval = -ENOMEM;
goto cleanup_bothirqs;
}
INIT_LIST_HEAD(&maple_waitq);
INIT_LIST_HEAD(&maple_sentq);
/* setup maple ports */
for (i = 0; i < MAPLE_PORTS; i++) {
checked[i] = false;
empty[i] = false;
mdev[i] = maple_alloc_dev(i, 0);
if (!mdev[i]) {
while (i-- > 0)
maple_free_dev(mdev[i]);
retval = -ENOMEM;
goto cleanup_cache;
}
baseunits[i] = mdev[i];
atomic_set(&mdev[i]->busy, 1);
maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
subdevice_map[i] = 0;
}
maple_pnp_time = jiffies + HZ;
/* prepare initial queue */
maple_send();
dev_info(&maple_bus, "bus core now registered\n");
return 0;
cleanup_cache:
kmem_cache_destroy(maple_queue_cache);
cleanup_bothirqs:
free_irq(HW_EVENT_VSYNC, 0);
cleanup_irq:
free_irq(HW_EVENT_MAPLE_DMA, 0);
cleanup_dma:
free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
cleanup_basic:
driver_unregister(&maple_unsupported_device.drv);
cleanup_bus:
bus_unregister(&maple_bus_type);
cleanup_device:
device_unregister(&maple_bus);
cleanup:
printk(KERN_ERR "Maple bus registration failed\n");
return retval;
}
/* Push init to later to ensure hardware gets detected */
fs_initcall(maple_bus_init);
| linux-master | drivers/sh/maple/maple.c |
/*
* Helper routines for SuperH Clock Pulse Generator blocks (CPG).
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/sh_clk.h>
#define CPG_CKSTP_BIT BIT(8)
static unsigned int sh_clk_read(struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
return ioread8(clk->mapped_reg);
else if (clk->flags & CLK_ENABLE_REG_16BIT)
return ioread16(clk->mapped_reg);
return ioread32(clk->mapped_reg);
}
static void sh_clk_write(int value, struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
iowrite8(value, clk->mapped_reg);
else if (clk->flags & CLK_ENABLE_REG_16BIT)
iowrite16(value, clk->mapped_reg);
else
iowrite32(value, clk->mapped_reg);
}
static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
if (clk->status_reg) {
unsigned int (*read)(const void __iomem *addr);
int i;
void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
(phys_addr_t)clk->enable_reg + clk->mapped_reg;
if (clk->flags & CLK_ENABLE_REG_8BIT)
read = ioread8;
else if (clk->flags & CLK_ENABLE_REG_16BIT)
read = ioread16;
else
read = ioread32;
for (i = 1000;
(read(mapped_status) & (1 << clk->enable_bit)) && i;
i--)
cpu_relax();
if (!i) {
pr_err("cpg: failed to enable %p[%d]\n",
clk->enable_reg, clk->enable_bit);
return -ETIMEDOUT;
}
}
return 0;
}
static void sh_clk_mstp_disable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
}
static struct sh_clk_ops sh_clk_mstp_clk_ops = {
.enable = sh_clk_mstp_enable,
.disable = sh_clk_mstp_disable,
.recalc = followparent_recalc,
};
int __init sh_clk_mstp_register(struct clk *clks, int nr)
{
struct clk *clkp;
int ret = 0;
int k;
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
clkp->ops = &sh_clk_mstp_clk_ops;
ret |= clk_register(clkp);
}
return ret;
}
/*
* Div/mult table lookup helpers
*/
static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
{
return clk->priv;
}
static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
{
return clk_to_div_table(clk)->div_mult_table;
}
/*
* Common div ops
*/
static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
{
return clk_rate_table_round(clk, clk->freq_table, rate);
}
static unsigned long sh_clk_div_recalc(struct clk *clk)
{
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
unsigned int idx;
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, clk->arch_flags ? &clk->arch_flags : NULL);
idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
return clk->freq_table[idx].frequency;
}
static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
{
struct clk_div_table *dt = clk_to_div_table(clk);
unsigned long value;
int idx;
idx = clk_rate_table_find(clk, clk->freq_table, rate);
if (idx < 0)
return idx;
value = sh_clk_read(clk);
value &= ~(clk->div_mask << clk->enable_bit);
value |= (idx << clk->enable_bit);
sh_clk_write(value, clk);
/* XXX: Should use a post-change notifier */
if (dt->kick)
dt->kick(clk);
return 0;
}
static int sh_clk_div_enable(struct clk *clk)
{
if (clk->div_mask == SH_CLK_DIV6_MSK) {
int ret = sh_clk_div_set_rate(clk, clk->rate);
if (ret < 0)
return ret;
}
sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
return 0;
}
static void sh_clk_div_disable(struct clk *clk)
{
unsigned int val;
val = sh_clk_read(clk);
val |= CPG_CKSTP_BIT;
/*
* div6 clocks require the divisor field to be non-zero or the
* above CKSTP toggle silently fails. Ensure that the divisor
* array is reset to its initial state on disable.
*/
if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
val |= clk->div_mask;
sh_clk_write(val, clk);
}
static struct sh_clk_ops sh_clk_div_clk_ops = {
.recalc = sh_clk_div_recalc,
.set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
};
static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
.recalc = sh_clk_div_recalc,
.set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
.enable = sh_clk_div_enable,
.disable = sh_clk_div_disable,
};
static int __init sh_clk_init_parent(struct clk *clk)
{
u32 val;
if (clk->parent)
return 0;
if (!clk->parent_table || !clk->parent_num)
return 0;
if (!clk->src_width) {
pr_err("sh_clk_init_parent: cannot select parent clock\n");
return -EINVAL;
}
val = (sh_clk_read(clk) >> clk->src_shift);
val &= (1 << clk->src_width) - 1;
if (val >= clk->parent_num) {
pr_err("sh_clk_init_parent: parent table size failed\n");
return -EINVAL;
}
clk_reparent(clk, clk->parent_table[val]);
if (!clk->parent) {
pr_err("sh_clk_init_parent: unable to set parent");
return -EINVAL;
}
return 0;
}
static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
struct clk_div_table *table, struct sh_clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
int nr_divs = table->div_mult_table->nr_divisors;
int freq_table_size = sizeof(struct cpufreq_frequency_table);
int ret = 0;
int k;
freq_table_size *= (nr_divs + 1);
freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
if (!freq_table) {
pr_err("%s: unable to alloc memory\n", __func__);
return -ENOMEM;
}
for (k = 0; !ret && (k < nr); k++) {
clkp = clks + k;
clkp->ops = ops;
clkp->priv = table;
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
ret = clk_register(clkp);
if (ret == 0)
ret = sh_clk_init_parent(clkp);
}
return ret;
}
/*
* div6 support
*/
static int sh_clk_div6_divisors[64] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
};
static struct clk_div_mult_table div6_div_mult_table = {
.divisors = sh_clk_div6_divisors,
.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
};
static struct clk_div_table sh_clk_div6_table = {
.div_mult_table = &div6_div_mult_table,
};
static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
{
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
u32 value;
int ret, i;
if (!clk->parent_table || !clk->parent_num)
return -EINVAL;
/* Search the parent */
for (i = 0; i < clk->parent_num; i++)
if (clk->parent_table[i] == parent)
break;
if (i == clk->parent_num)
return -ENODEV;
ret = clk_reparent(clk, parent);
if (ret < 0)
return ret;
value = sh_clk_read(clk) &
~(((1 << clk->src_width) - 1) << clk->src_shift);
sh_clk_write(value | (i << clk->src_shift), clk);
/* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, NULL);
return 0;
}
static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
.recalc = sh_clk_div_recalc,
.round_rate = sh_clk_div_round_rate,
.set_rate = sh_clk_div_set_rate,
.enable = sh_clk_div_enable,
.disable = sh_clk_div_disable,
.set_parent = sh_clk_div6_set_parent,
};
int __init sh_clk_div6_register(struct clk *clks, int nr)
{
return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
&sh_clk_div_enable_clk_ops);
}
int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
{
return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
&sh_clk_div6_reparent_clk_ops);
}
/*
* div4 support
*/
static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
{
struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
u32 value;
int ret;
/* we really need a better way to determine parent index, but for
* now assume internal parent comes with CLK_ENABLE_ON_INIT set,
* no CLK_ENABLE_ON_INIT means external clock...
*/
if (parent->flags & CLK_ENABLE_ON_INIT)
value = sh_clk_read(clk) & ~(1 << 7);
else
value = sh_clk_read(clk) | (1 << 7);
ret = clk_reparent(clk, parent);
if (ret < 0)
return ret;
sh_clk_write(value, clk);
/* Rebiuld the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, &clk->arch_flags);
return 0;
}
static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
.recalc = sh_clk_div_recalc,
.set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
.enable = sh_clk_div_enable,
.disable = sh_clk_div_disable,
.set_parent = sh_clk_div4_set_parent,
};
int __init sh_clk_div4_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
}
int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
return sh_clk_div_register_ops(clks, nr, table,
&sh_clk_div_enable_clk_ops);
}
int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
return sh_clk_div_register_ops(clks, nr, table,
&sh_clk_div4_reparent_clk_ops);
}
/* FSI-DIV */
static unsigned long fsidiv_recalc(struct clk *clk)
{
u32 value;
value = __raw_readl(clk->mapping->base);
value >>= 16;
if (value < 2)
return clk->parent->rate;
return clk->parent->rate / value;
}
static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
{
return clk_rate_div_range_round(clk, 1, 0xffff, rate);
}
static void fsidiv_disable(struct clk *clk)
{
__raw_writel(0, clk->mapping->base);
}
static int fsidiv_enable(struct clk *clk)
{
u32 value;
value = __raw_readl(clk->mapping->base) >> 16;
if (value < 2)
return 0;
__raw_writel((value << 16) | 0x3, clk->mapping->base);
return 0;
}
static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
{
int idx;
idx = (clk->parent->rate / rate) & 0xffff;
if (idx < 2)
__raw_writel(0, clk->mapping->base);
else
__raw_writel(idx << 16, clk->mapping->base);
return 0;
}
static struct sh_clk_ops fsidiv_clk_ops = {
.recalc = fsidiv_recalc,
.round_rate = fsidiv_round_rate,
.set_rate = fsidiv_set_rate,
.enable = fsidiv_enable,
.disable = fsidiv_disable,
};
int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
{
struct clk_mapping *map;
int i;
for (i = 0; i < nr; i++) {
map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
if (!map) {
pr_err("%s: unable to alloc memory\n", __func__);
return -ENOMEM;
}
/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
map->phys = (phys_addr_t)clks[i].enable_reg;
map->len = 8;
clks[i].enable_reg = 0; /* remove .enable_reg */
clks[i].ops = &fsidiv_clk_ops;
clks[i].mapping = map;
clk_register(&clks[i]);
}
return 0;
}
| linux-master | drivers/sh/clk/cpg.c |
/*
* SuperH clock framework
*
* Copyright (C) 2005 - 2010 Paul Mundt
*
* This clock framework is derived from the OMAP version by:
*
* Copyright (C) 2004 - 2008 Nokia Corporation
* Written by Tuukka Tikkanen <[email protected]>
*
* Modified for omap shared clock framework by Tony Lindgren <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "clock: " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/syscore_ops.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_sem);
/* clock disable operations are not passed on to hardware during boot */
static int allow_disable;
void clk_rate_table_build(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
int nr_freqs,
struct clk_div_mult_table *src_table,
unsigned long *bitmap)
{
unsigned long mult, div;
unsigned long freq;
int i;
clk->nr_freqs = nr_freqs;
for (i = 0; i < nr_freqs; i++) {
div = 1;
mult = 1;
if (src_table->divisors && i < src_table->nr_divisors)
div = src_table->divisors[i];
if (src_table->multipliers && i < src_table->nr_multipliers)
mult = src_table->multipliers[i];
if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
freq = CPUFREQ_ENTRY_INVALID;
else
freq = clk->parent->rate * mult / div;
freq_table[i].driver_data = i;
freq_table[i].frequency = freq;
}
/* Termination entry */
freq_table[i].driver_data = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
}
struct clk_rate_round_data;
struct clk_rate_round_data {
unsigned long rate;
unsigned int min, max;
long (*func)(unsigned int, struct clk_rate_round_data *);
void *arg;
};
#define for_each_frequency(pos, r, freq) \
for (pos = r->min, freq = r->func(pos, r); \
pos <= r->max; pos++, freq = r->func(pos, r)) \
if (unlikely(freq == 0)) \
; \
else
static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
{
unsigned long rate_error, rate_error_prev = ~0UL;
unsigned long highest, lowest, freq;
long rate_best_fit = -ENOENT;
int i;
highest = 0;
lowest = ~0UL;
for_each_frequency(i, rounder, freq) {
if (freq > highest)
highest = freq;
if (freq < lowest)
lowest = freq;
rate_error = abs(freq - rounder->rate);
if (rate_error < rate_error_prev) {
rate_best_fit = freq;
rate_error_prev = rate_error;
}
if (rate_error == 0)
break;
}
if (rounder->rate >= highest)
rate_best_fit = highest;
if (rounder->rate <= lowest)
rate_best_fit = lowest;
return rate_best_fit;
}
static long clk_rate_table_iter(unsigned int pos,
struct clk_rate_round_data *rounder)
{
struct cpufreq_frequency_table *freq_table = rounder->arg;
unsigned long freq = freq_table[pos].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
freq = 0;
return freq;
}
long clk_rate_table_round(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
unsigned long rate)
{
struct clk_rate_round_data table_round = {
.min = 0,
.max = clk->nr_freqs - 1,
.func = clk_rate_table_iter,
.arg = freq_table,
.rate = rate,
};
if (clk->nr_freqs < 1)
return -ENOSYS;
return clk_rate_round_helper(&table_round);
}
static long clk_rate_div_range_iter(unsigned int pos,
struct clk_rate_round_data *rounder)
{
return clk_get_rate(rounder->arg) / pos;
}
long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
unsigned int div_max, unsigned long rate)
{
struct clk_rate_round_data div_range_round = {
.min = div_min,
.max = div_max,
.func = clk_rate_div_range_iter,
.arg = clk_get_parent(clk),
.rate = rate,
};
return clk_rate_round_helper(&div_range_round);
}
static long clk_rate_mult_range_iter(unsigned int pos,
struct clk_rate_round_data *rounder)
{
return clk_get_rate(rounder->arg) * pos;
}
long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
unsigned int mult_max, unsigned long rate)
{
struct clk_rate_round_data mult_range_round = {
.min = mult_min,
.max = mult_max,
.func = clk_rate_mult_range_iter,
.arg = clk_get_parent(clk),
.rate = rate,
};
return clk_rate_round_helper(&mult_range_round);
}
int clk_rate_table_find(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
unsigned long rate)
{
struct cpufreq_frequency_table *pos;
int idx;
cpufreq_for_each_valid_entry_idx(pos, freq_table, idx)
if (pos->frequency == rate)
return idx;
return -ENOENT;
}
/* Used for clocks that always have same value as the parent clock */
unsigned long followparent_recalc(struct clk *clk)
{
return clk->parent ? clk->parent->rate : 0;
}
int clk_reparent(struct clk *child, struct clk *parent)
{
list_del_init(&child->sibling);
if (parent)
list_add(&child->sibling, &parent->children);
child->parent = parent;
return 0;
}
/* Propagate rate to children */
void propagate_rate(struct clk *tclk)
{
struct clk *clkp;
list_for_each_entry(clkp, &tclk->children, sibling) {
if (clkp->ops && clkp->ops->recalc)
clkp->rate = clkp->ops->recalc(clkp);
propagate_rate(clkp);
}
}
static void __clk_disable(struct clk *clk)
{
if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
clk))
return;
if (!(--clk->usecount)) {
if (likely(allow_disable && clk->ops && clk->ops->disable))
clk->ops->disable(clk);
if (likely(clk->parent))
__clk_disable(clk->parent);
}
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
if (!clk)
return;
spin_lock_irqsave(&clock_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clock_lock, flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
static int __clk_enable(struct clk *clk)
{
int ret = 0;
if (clk->usecount++ == 0) {
if (clk->parent) {
ret = __clk_enable(clk->parent);
if (unlikely(ret))
goto err;
}
if (clk->ops && clk->ops->enable) {
ret = clk->ops->enable(clk);
if (ret) {
if (clk->parent)
__clk_disable(clk->parent);
goto err;
}
}
}
return ret;
err:
clk->usecount--;
return ret;
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
if (!clk)
return 0;
spin_lock_irqsave(&clock_lock, flags);
ret = __clk_enable(clk);
spin_unlock_irqrestore(&clock_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_enable);
static LIST_HEAD(root_clks);
/**
* recalculate_root_clocks - recalculate and propagate all root clocks
*
* Recalculates all root clocks (clocks with no parent), which if the
* clock's .recalc is set correctly, should also propagate their rates.
* Called at init.
*/
void recalculate_root_clocks(void)
{
struct clk *clkp;
list_for_each_entry(clkp, &root_clks, sibling) {
if (clkp->ops && clkp->ops->recalc)
clkp->rate = clkp->ops->recalc(clkp);
propagate_rate(clkp);
}
}
static struct clk_mapping dummy_mapping;
static struct clk *lookup_root_clock(struct clk *clk)
{
while (clk->parent)
clk = clk->parent;
return clk;
}
static int clk_establish_mapping(struct clk *clk)
{
struct clk_mapping *mapping = clk->mapping;
/*
* Propagate mappings.
*/
if (!mapping) {
struct clk *clkp;
/*
* dummy mapping for root clocks with no specified ranges
*/
if (!clk->parent) {
clk->mapping = &dummy_mapping;
goto out;
}
/*
* If we're on a child clock and it provides no mapping of its
* own, inherit the mapping from its root clock.
*/
clkp = lookup_root_clock(clk);
mapping = clkp->mapping;
BUG_ON(!mapping);
}
/*
* Establish initial mapping.
*/
if (!mapping->base && mapping->phys) {
kref_init(&mapping->ref);
mapping->base = ioremap(mapping->phys, mapping->len);
if (unlikely(!mapping->base))
return -ENXIO;
} else if (mapping->base) {
/*
* Bump the refcount for an existing mapping
*/
kref_get(&mapping->ref);
}
clk->mapping = mapping;
out:
clk->mapped_reg = clk->mapping->base;
clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
return 0;
}
static void clk_destroy_mapping(struct kref *kref)
{
struct clk_mapping *mapping;
mapping = container_of(kref, struct clk_mapping, ref);
iounmap(mapping->base);
}
static void clk_teardown_mapping(struct clk *clk)
{
struct clk_mapping *mapping = clk->mapping;
/* Nothing to do */
if (mapping == &dummy_mapping)
goto out;
kref_put(&mapping->ref, clk_destroy_mapping);
clk->mapping = NULL;
out:
clk->mapped_reg = NULL;
}
int clk_register(struct clk *clk)
{
int ret;
if (IS_ERR_OR_NULL(clk))
return -EINVAL;
/*
* trap out already registered clocks
*/
if (clk->node.next || clk->node.prev)
return 0;
mutex_lock(&clock_list_sem);
INIT_LIST_HEAD(&clk->children);
clk->usecount = 0;
ret = clk_establish_mapping(clk);
if (unlikely(ret))
goto out_unlock;
if (clk->parent)
list_add(&clk->sibling, &clk->parent->children);
else
list_add(&clk->sibling, &root_clks);
list_add(&clk->node, &clock_list);
#ifdef CONFIG_SH_CLK_CPG_LEGACY
if (clk->ops && clk->ops->init)
clk->ops->init(clk);
#endif
out_unlock:
mutex_unlock(&clock_list_sem);
return ret;
}
EXPORT_SYMBOL_GPL(clk_register);
void clk_unregister(struct clk *clk)
{
mutex_lock(&clock_list_sem);
list_del(&clk->sibling);
list_del(&clk->node);
clk_teardown_mapping(clk);
mutex_unlock(&clock_list_sem);
}
EXPORT_SYMBOL_GPL(clk_unregister);
void clk_enable_init_clocks(void)
{
struct clk *clkp;
list_for_each_entry(clkp, &clock_list, node)
if (clkp->flags & CLK_ENABLE_ON_INIT)
clk_enable(clkp);
}
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
return 0;
return clk->rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret = -EOPNOTSUPP;
unsigned long flags;
if (!clk)
return 0;
spin_lock_irqsave(&clock_lock, flags);
if (likely(clk->ops && clk->ops->set_rate)) {
ret = clk->ops->set_rate(clk, rate);
if (ret != 0)
goto out_unlock;
} else {
clk->rate = rate;
ret = 0;
}
if (clk->ops && clk->ops->recalc)
clk->rate = clk->ops->recalc(clk);
propagate_rate(clk);
out_unlock:
spin_unlock_irqrestore(&clock_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
unsigned long flags;
int ret = -EINVAL;
if (!parent || !clk)
return ret;
if (clk->parent == parent)
return 0;
spin_lock_irqsave(&clock_lock, flags);
if (clk->usecount == 0) {
if (clk->ops->set_parent)
ret = clk->ops->set_parent(clk, parent);
else
ret = clk_reparent(clk, parent);
if (ret == 0) {
if (clk->ops->recalc)
clk->rate = clk->ops->recalc(clk);
pr_debug("set parent of %p to %p (new rate %ld)\n",
clk, clk->parent, clk->rate);
propagate_rate(clk);
}
} else
ret = -EBUSY;
spin_unlock_irqrestore(&clock_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
struct clk *clk_get_parent(struct clk *clk)
{
if (!clk)
return NULL;
return clk->parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (!clk)
return 0;
if (likely(clk->ops && clk->ops->round_rate)) {
unsigned long flags, rounded;
spin_lock_irqsave(&clock_lock, flags);
rounded = clk->ops->round_rate(clk, rate);
spin_unlock_irqrestore(&clock_lock, flags);
return rounded;
}
return clk_get_rate(clk);
}
EXPORT_SYMBOL_GPL(clk_round_rate);
#ifdef CONFIG_PM
static void clks_core_resume(void)
{
struct clk *clkp;
list_for_each_entry(clkp, &clock_list, node) {
if (likely(clkp->usecount && clkp->ops)) {
unsigned long rate = clkp->rate;
if (likely(clkp->ops->set_parent))
clkp->ops->set_parent(clkp,
clkp->parent);
if (likely(clkp->ops->set_rate))
clkp->ops->set_rate(clkp, rate);
else if (likely(clkp->ops->recalc))
clkp->rate = clkp->ops->recalc(clkp);
}
}
}
static struct syscore_ops clks_syscore_ops = {
.resume = clks_core_resume,
};
static int __init clk_syscore_init(void)
{
register_syscore_ops(&clks_syscore_ops);
return 0;
}
subsys_initcall(clk_syscore_init);
#endif
static int __init clk_late_init(void)
{
unsigned long flags;
struct clk *clk;
/* disable all clocks with zero use count */
mutex_lock(&clock_list_sem);
spin_lock_irqsave(&clock_lock, flags);
list_for_each_entry(clk, &clock_list, node)
if (!clk->usecount && clk->ops && clk->ops->disable)
clk->ops->disable(clk);
/* from now on allow clock disable operations */
allow_disable = 1;
spin_unlock_irqrestore(&clock_lock, flags);
mutex_unlock(&clock_list_sem);
return 0;
}
late_initcall(clk_late_init);
| linux-master | drivers/sh/clk/core.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB Skeleton driver - 2.2
*
* Copyright (C) 2001-2004 Greg Kroah-Hartman ([email protected])
*
* This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
* but has been rewritten to be easier to read and use.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
/* Define these values to match your devices */
#define USB_SKEL_VENDOR_ID 0xfff0
#define USB_SKEL_PRODUCT_ID 0xfff0
/* table of devices that work with this driver */
static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, skel_table);
/* Get a minor range for your devices from the usb maintainer */
#define USB_SKEL_MINOR_BASE 192
/* our private defines. if this grows any larger, use your own .h file */
#define MAX_TRANSFER (PAGE_SIZE - 512)
/*
* MAX_TRANSFER is chosen so that the VM is not stressed by
* allocations > PAGE_SIZE and the number of packets in a page
* is an integer 512 is the largest possible packet on EHCI
*/
#define WRITES_IN_FLIGHT 8
/* arbitrarily chosen */
/* Structure to hold all of our device specific stuff */
struct usb_skel {
struct usb_device *udev; /* the usb device for this device */
struct usb_interface *interface; /* the interface for this device */
struct semaphore limit_sem; /* limiting the number of writes in progress */
struct usb_anchor submitted; /* in case we need to retract our submissions */
struct urb *bulk_in_urb; /* the urb to read data with */
unsigned char *bulk_in_buffer; /* the buffer to receive data */
size_t bulk_in_size; /* the size of the receive buffer */
size_t bulk_in_filled; /* number of bytes in the buffer */
size_t bulk_in_copied; /* already copied to user space */
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
int errors; /* the last request tanked */
bool ongoing_read; /* a read is going on */
spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
unsigned long disconnected:1;
wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
static struct usb_driver skel_driver;
static void skel_draw_down(struct usb_skel *dev);
static void skel_delete(struct kref *kref)
{
struct usb_skel *dev = to_skel_dev(kref);
usb_free_urb(dev->bulk_in_urb);
usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
}
static int skel_open(struct inode *inode, struct file *file)
{
struct usb_skel *dev;
struct usb_interface *interface;
int subminor;
int retval = 0;
subminor = iminor(inode);
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
goto exit;
}
retval = usb_autopm_get_interface(interface);
if (retval)
goto exit;
/* increment our usage count for the device */
kref_get(&dev->kref);
/* save our object in the file's private structure */
file->private_data = dev;
exit:
return retval;
}
static int skel_release(struct inode *inode, struct file *file)
{
struct usb_skel *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* allow the device to be autosuspended */
usb_autopm_put_interface(dev->interface);
/* decrement the count on our device */
kref_put(&dev->kref, skel_delete);
return 0;
}
static int skel_flush(struct file *file, fl_owner_t id)
{
struct usb_skel *dev;
int res;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* wait for io to stop */
mutex_lock(&dev->io_mutex);
skel_draw_down(dev);
/* read out errors, leave subsequent opens a clean slate */
spin_lock_irq(&dev->err_lock);
res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
dev->errors = 0;
spin_unlock_irq(&dev->err_lock);
mutex_unlock(&dev->io_mutex);
return res;
}
static void skel_read_bulk_callback(struct urb *urb)
{
struct usb_skel *dev;
unsigned long flags;
dev = urb->context;
spin_lock_irqsave(&dev->err_lock, flags);
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
dev_err(&dev->interface->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
dev->errors = urb->status;
} else {
dev->bulk_in_filled = urb->actual_length;
}
dev->ongoing_read = 0;
spin_unlock_irqrestore(&dev->err_lock, flags);
wake_up_interruptible(&dev->bulk_in_wait);
}
static int skel_do_read_io(struct usb_skel *dev, size_t count)
{
int rv;
/* prepare a read */
usb_fill_bulk_urb(dev->bulk_in_urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
dev->bulk_in_endpointAddr),
dev->bulk_in_buffer,
min(dev->bulk_in_size, count),
skel_read_bulk_callback,
dev);
/* tell everybody to leave the URB alone */
spin_lock_irq(&dev->err_lock);
dev->ongoing_read = 1;
spin_unlock_irq(&dev->err_lock);
/* submit bulk in urb, which means no data to deliver */
dev->bulk_in_filled = 0;
dev->bulk_in_copied = 0;
/* do it */
rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
if (rv < 0) {
dev_err(&dev->interface->dev,
"%s - failed submitting read urb, error %d\n",
__func__, rv);
rv = (rv == -ENOMEM) ? rv : -EIO;
spin_lock_irq(&dev->err_lock);
dev->ongoing_read = 0;
spin_unlock_irq(&dev->err_lock);
}
return rv;
}
static ssize_t skel_read(struct file *file, char *buffer, size_t count,
loff_t *ppos)
{
struct usb_skel *dev;
int rv;
bool ongoing_io;
dev = file->private_data;
if (!count)
return 0;
/* no concurrent readers */
rv = mutex_lock_interruptible(&dev->io_mutex);
if (rv < 0)
return rv;
if (dev->disconnected) { /* disconnect() was called */
rv = -ENODEV;
goto exit;
}
/* if IO is under way, we must not touch things */
retry:
spin_lock_irq(&dev->err_lock);
ongoing_io = dev->ongoing_read;
spin_unlock_irq(&dev->err_lock);
if (ongoing_io) {
/* nonblocking IO shall not wait */
if (file->f_flags & O_NONBLOCK) {
rv = -EAGAIN;
goto exit;
}
/*
* IO may take forever
* hence wait in an interruptible state
*/
rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
if (rv < 0)
goto exit;
}
/* errors must be reported */
rv = dev->errors;
if (rv < 0) {
/* any error is reported once */
dev->errors = 0;
/* to preserve notifications about reset */
rv = (rv == -EPIPE) ? rv : -EIO;
/* report it */
goto exit;
}
/*
* if the buffer is filled we may satisfy the read
* else we need to start IO
*/
if (dev->bulk_in_filled) {
/* we had read data */
size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
size_t chunk = min(available, count);
if (!available) {
/*
* all data has been used
* actual IO needs to be done
*/
rv = skel_do_read_io(dev, count);
if (rv < 0)
goto exit;
else
goto retry;
}
/*
* data is available
* chunk tells us how much shall be copied
*/
if (copy_to_user(buffer,
dev->bulk_in_buffer + dev->bulk_in_copied,
chunk))
rv = -EFAULT;
else
rv = chunk;
dev->bulk_in_copied += chunk;
/*
* if we are asked for more than we have,
* we start IO but don't wait
*/
if (available < count)
skel_do_read_io(dev, count - chunk);
} else {
/* no data in the buffer */
rv = skel_do_read_io(dev, count);
if (rv < 0)
goto exit;
else
goto retry;
}
exit:
mutex_unlock(&dev->io_mutex);
return rv;
}
static void skel_write_bulk_callback(struct urb *urb)
{
struct usb_skel *dev;
unsigned long flags;
dev = urb->context;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
dev_err(&dev->interface->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
spin_lock_irqsave(&dev->err_lock, flags);
dev->errors = urb->status;
spin_unlock_irqrestore(&dev->err_lock, flags);
}
/* free up our allocated buffer */
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
static ssize_t skel_write(struct file *file, const char *user_buffer,
size_t count, loff_t *ppos)
{
struct usb_skel *dev;
int retval = 0;
struct urb *urb = NULL;
char *buf = NULL;
size_t writesize = min_t(size_t, count, MAX_TRANSFER);
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
/*
* limit the number of URBs in flight to stop a user from using up all
* RAM
*/
if (!(file->f_flags & O_NONBLOCK)) {
if (down_interruptible(&dev->limit_sem)) {
retval = -ERESTARTSYS;
goto exit;
}
} else {
if (down_trylock(&dev->limit_sem)) {
retval = -EAGAIN;
goto exit;
}
}
spin_lock_irq(&dev->err_lock);
retval = dev->errors;
if (retval < 0) {
/* any error is reported once */
dev->errors = 0;
/* to preserve notifications about reset */
retval = (retval == -EPIPE) ? retval : -EIO;
}
spin_unlock_irq(&dev->err_lock);
if (retval < 0)
goto error;
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
goto error;
}
buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
}
if (copy_from_user(buf, user_buffer, writesize)) {
retval = -EFAULT;
goto error;
}
/* this lock makes sure we don't submit URBs to gone devices */
mutex_lock(&dev->io_mutex);
if (dev->disconnected) { /* disconnect() was called */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
}
/* initialize the urb properly */
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
buf, writesize, skel_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->submitted);
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
mutex_unlock(&dev->io_mutex);
if (retval) {
dev_err(&dev->interface->dev,
"%s - failed submitting write urb, error %d\n",
__func__, retval);
goto error_unanchor;
}
/*
* release our reference to this urb, the USB core will eventually free
* it entirely
*/
usb_free_urb(urb);
return writesize;
error_unanchor:
usb_unanchor_urb(urb);
error:
if (urb) {
usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
usb_free_urb(urb);
}
up(&dev->limit_sem);
exit:
return retval;
}
static const struct file_operations skel_fops = {
.owner = THIS_MODULE,
.read = skel_read,
.write = skel_write,
.open = skel_open,
.release = skel_release,
.flush = skel_flush,
.llseek = noop_llseek,
};
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver skel_class = {
.name = "skel%d",
.fops = &skel_fops,
.minor_base = USB_SKEL_MINOR_BASE,
};
static int skel_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_skel *dev;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
int retval;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
kref_init(&dev->kref);
sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
mutex_init(&dev->io_mutex);
spin_lock_init(&dev->err_lock);
init_usb_anchor(&dev->submitted);
init_waitqueue_head(&dev->bulk_in_wait);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
retval = usb_find_common_endpoints(interface->cur_altsetting,
&bulk_in, &bulk_out, NULL, NULL);
if (retval) {
dev_err(&interface->dev,
"Could not find both bulk-in and bulk-out endpoints\n");
goto error;
}
dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
retval = -ENOMEM;
goto error;
}
dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->bulk_in_urb) {
retval = -ENOMEM;
goto error;
}
dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &skel_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
usb_set_intfdata(interface, NULL);
goto error;
}
/* let the user know what node this device is now attached to */
dev_info(&interface->dev,
"USB Skeleton device now attached to USBSkel-%d",
interface->minor);
return 0;
error:
/* this frees allocated memory */
kref_put(&dev->kref, skel_delete);
return retval;
}
static void skel_disconnect(struct usb_interface *interface)
{
struct usb_skel *dev;
int minor = interface->minor;
dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &skel_class);
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
usb_kill_urb(dev->bulk_in_urb);
usb_kill_anchored_urbs(&dev->submitted);
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
}
static void skel_draw_down(struct usb_skel *dev)
{
int time;
time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
if (!time)
usb_kill_anchored_urbs(&dev->submitted);
usb_kill_urb(dev->bulk_in_urb);
}
static int skel_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_skel *dev = usb_get_intfdata(intf);
if (!dev)
return 0;
skel_draw_down(dev);
return 0;
}
static int skel_resume(struct usb_interface *intf)
{
return 0;
}
static int skel_pre_reset(struct usb_interface *intf)
{
struct usb_skel *dev = usb_get_intfdata(intf);
mutex_lock(&dev->io_mutex);
skel_draw_down(dev);
return 0;
}
static int skel_post_reset(struct usb_interface *intf)
{
struct usb_skel *dev = usb_get_intfdata(intf);
/* we are sure no URBs are active - no locking needed */
dev->errors = -EPIPE;
mutex_unlock(&dev->io_mutex);
return 0;
}
static struct usb_driver skel_driver = {
.name = "skeleton",
.probe = skel_probe,
.disconnect = skel_disconnect,
.suspend = skel_suspend,
.resume = skel_resume,
.pre_reset = skel_pre_reset,
.post_reset = skel_post_reset,
.id_table = skel_table,
.supports_autosuspend = 1,
};
module_usb_driver(skel_driver);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/usb-skeleton.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* API for creating and destroying USB onboard hub platform devices
*
* Copyright (c) 2022, Google LLC
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/of.h>
#include <linux/usb/onboard_hub.h>
#include "onboard_usb_hub.h"
struct pdev_list_entry {
struct platform_device *pdev;
struct list_head node;
};
static bool of_is_onboard_usb_hub(const struct device_node *np)
{
return !!of_match_node(onboard_hub_match, np);
}
/**
* onboard_hub_create_pdevs -- create platform devices for onboard USB hubs
* @parent_hub : parent hub to scan for connected onboard hubs
* @pdev_list : list of onboard hub platform devices owned by the parent hub
*
* Creates a platform device for each supported onboard hub that is connected to
* the given parent hub. The platform device is in charge of initializing the
* hub (enable regulators, take the hub out of reset, ...) and can optionally
* control whether the hub remains powered during system suspend or not.
*
* To keep track of the platform devices they are added to a list that is owned
* by the parent hub.
*
* Some background about the logic in this function, which can be a bit hard
* to follow:
*
* Root hubs don't have dedicated device tree nodes, but use the node of their
* HCD. The primary and secondary HCD are usually represented by a single DT
* node. That means the root hubs of the primary and secondary HCD share the
* same device tree node (the HCD node). As a result this function can be called
* twice with the same DT node for root hubs. We only want to create a single
* platform device for each physical onboard hub, hence for root hubs the loop
* is only executed for the root hub of the primary HCD. Since the function
* scans through all child nodes it still creates pdevs for onboard hubs
* connected to the root hub of the secondary HCD if needed.
*
* Further there must be only one platform device for onboard hubs with a peer
* hub (the hub is a single physical device). To achieve this two measures are
* taken: pdevs for onboard hubs with a peer are only created when the function
* is called on behalf of the parent hub that is connected to the primary HCD
* (directly or through other hubs). For onboard hubs connected to root hubs
* the function processes the nodes of both peers. A platform device is only
* created if the peer hub doesn't have one already.
*/
void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list)
{
int i;
struct usb_hcd *hcd = bus_to_hcd(parent_hub->bus);
struct device_node *np, *npc;
struct platform_device *pdev;
struct pdev_list_entry *pdle;
if (!parent_hub->dev.of_node)
return;
if (!parent_hub->parent && !usb_hcd_is_primary_hcd(hcd))
return;
for (i = 1; i <= parent_hub->maxchild; i++) {
np = usb_of_get_device_node(parent_hub, i);
if (!np)
continue;
if (!of_is_onboard_usb_hub(np))
goto node_put;
npc = of_parse_phandle(np, "peer-hub", 0);
if (npc) {
if (!usb_hcd_is_primary_hcd(hcd)) {
of_node_put(npc);
goto node_put;
}
pdev = of_find_device_by_node(npc);
of_node_put(npc);
if (pdev) {
put_device(&pdev->dev);
goto node_put;
}
}
pdev = of_platform_device_create(np, NULL, &parent_hub->dev);
if (!pdev) {
dev_err(&parent_hub->dev,
"failed to create platform device for onboard hub '%pOF'\n", np);
goto node_put;
}
pdle = kzalloc(sizeof(*pdle), GFP_KERNEL);
if (!pdle) {
of_platform_device_destroy(&pdev->dev, NULL);
goto node_put;
}
pdle->pdev = pdev;
list_add(&pdle->node, pdev_list);
node_put:
of_node_put(np);
}
}
EXPORT_SYMBOL_GPL(onboard_hub_create_pdevs);
/**
* onboard_hub_destroy_pdevs -- free resources of onboard hub platform devices
* @pdev_list : list of onboard hub platform devices
*
* Destroys the platform devices in the given list and frees the memory associated
* with the list entry.
*/
void onboard_hub_destroy_pdevs(struct list_head *pdev_list)
{
struct pdev_list_entry *pdle, *tmp;
list_for_each_entry_safe(pdle, tmp, pdev_list, node) {
list_del(&pdle->node);
of_platform_device_destroy(&pdle->pdev->dev, NULL);
kfree(pdle);
}
}
EXPORT_SYMBOL_GPL(onboard_hub_destroy_pdevs);
| linux-master | drivers/usb/misc/onboard_usb_hub_pdevs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* chaoskey - driver for ChaosKey device from Altus Metrum.
*
* This device provides true random numbers using a noise source based
* on a reverse-biased p-n junction in avalanche breakdown. More
* details can be found at http://chaoskey.org
*
* The driver connects to the kernel hardware RNG interface to provide
* entropy for /dev/random and other kernel activities. It also offers
* a separate /dev/ entry to allow for direct access to the random
* bit stream.
*
* Copyright © 2015 Keith Packard <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
static struct usb_driver chaoskey_driver;
static struct usb_class_driver chaoskey_class;
static int chaoskey_rng_read(struct hwrng *rng, void *data,
size_t max, bool wait);
#define usb_dbg(usb_if, format, arg...) \
dev_dbg(&(usb_if)->dev, format, ## arg)
#define usb_err(usb_if, format, arg...) \
dev_err(&(usb_if)->dev, format, ## arg)
/* Version Information */
#define DRIVER_AUTHOR "Keith Packard, [email protected]"
#define DRIVER_DESC "Altus Metrum ChaosKey driver"
#define DRIVER_SHORT "chaoskey"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */
#define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */
#define ALEA_VENDOR_ID 0x12d8 /* Araneus */
#define ALEA_PRODUCT_ID 0x0001 /* Alea I */
#define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */
#define NAK_TIMEOUT (HZ) /* normal stall/wait timeout */
#define ALEA_FIRST_TIMEOUT (HZ*3) /* first stall/wait timeout for Alea */
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_CHAOSKEY_MINOR_BASE 0
#else
/* IOWARRIOR_MINOR_BASE + 16, not official yet */
#define USB_CHAOSKEY_MINOR_BASE 224
#endif
static const struct usb_device_id chaoskey_table[] = {
{ USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) },
{ USB_DEVICE(ALEA_VENDOR_ID, ALEA_PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE(usb, chaoskey_table);
static void chaos_read_callback(struct urb *urb);
/* Driver-local specific stuff */
struct chaoskey {
struct usb_interface *interface;
char in_ep;
struct mutex lock;
struct mutex rng_lock;
int open; /* open count */
bool present; /* device not disconnected */
bool reading; /* ongoing IO */
bool reads_started; /* track first read for Alea */
int size; /* size of buf */
int valid; /* bytes of buf read */
int used; /* bytes of buf consumed */
char *name; /* product + serial */
struct hwrng hwrng; /* Embedded struct for hwrng */
int hwrng_registered; /* registered with hwrng API */
wait_queue_head_t wait_q; /* for timeouts */
struct urb *urb; /* for performing IO */
char *buf;
};
static void chaoskey_free(struct chaoskey *dev)
{
if (dev) {
usb_dbg(dev->interface, "free");
usb_free_urb(dev->urb);
kfree(dev->name);
kfree(dev->buf);
usb_put_intf(dev->interface);
kfree(dev);
}
}
static int chaoskey_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *altsetting = interface->cur_altsetting;
struct usb_endpoint_descriptor *epd;
int in_ep;
struct chaoskey *dev;
int result = -ENOMEM;
int size;
int res;
usb_dbg(interface, "probe %s-%s", udev->product, udev->serial);
/* Find the first bulk IN endpoint and its packet size */
res = usb_find_bulk_in_endpoint(altsetting, &epd);
if (res) {
usb_dbg(interface, "no IN endpoint found");
return res;
}
in_ep = usb_endpoint_num(epd);
size = usb_endpoint_maxp(epd);
/* Validate endpoint and size */
if (size <= 0) {
usb_dbg(interface, "invalid size (%d)", size);
return -ENODEV;
}
if (size > CHAOSKEY_BUF_LEN) {
usb_dbg(interface, "size reduced from %d to %d\n",
size, CHAOSKEY_BUF_LEN);
size = CHAOSKEY_BUF_LEN;
}
/* Looks good, allocate and initialize */
dev = kzalloc(sizeof(struct chaoskey), GFP_KERNEL);
if (dev == NULL)
goto out;
dev->interface = usb_get_intf(interface);
dev->buf = kmalloc(size, GFP_KERNEL);
if (dev->buf == NULL)
goto out;
dev->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb)
goto out;
usb_fill_bulk_urb(dev->urb,
udev,
usb_rcvbulkpipe(udev, in_ep),
dev->buf,
size,
chaos_read_callback,
dev);
/* Construct a name using the product and serial values. Each
* device needs a unique name for the hwrng code
*/
if (udev->product && udev->serial) {
dev->name = kasprintf(GFP_KERNEL, "%s-%s", udev->product,
udev->serial);
if (dev->name == NULL)
goto out;
}
dev->in_ep = in_ep;
if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
dev->reads_started = true;
dev->size = size;
dev->present = true;
init_waitqueue_head(&dev->wait_q);
mutex_init(&dev->lock);
mutex_init(&dev->rng_lock);
usb_set_intfdata(interface, dev);
result = usb_register_dev(interface, &chaoskey_class);
if (result) {
usb_err(interface, "Unable to allocate minor number.");
goto out;
}
dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name;
dev->hwrng.read = chaoskey_rng_read;
dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0);
if (!dev->hwrng_registered)
usb_err(interface, "Unable to register with hwrng");
usb_enable_autosuspend(udev);
usb_dbg(interface, "chaoskey probe success, size %d", dev->size);
return 0;
out:
usb_set_intfdata(interface, NULL);
chaoskey_free(dev);
return result;
}
static void chaoskey_disconnect(struct usb_interface *interface)
{
struct chaoskey *dev;
usb_dbg(interface, "disconnect");
dev = usb_get_intfdata(interface);
if (!dev) {
usb_dbg(interface, "disconnect failed - no dev");
return;
}
if (dev->hwrng_registered)
hwrng_unregister(&dev->hwrng);
usb_deregister_dev(interface, &chaoskey_class);
usb_set_intfdata(interface, NULL);
mutex_lock(&dev->lock);
dev->present = false;
usb_poison_urb(dev->urb);
if (!dev->open) {
mutex_unlock(&dev->lock);
chaoskey_free(dev);
} else
mutex_unlock(&dev->lock);
usb_dbg(interface, "disconnect done");
}
static int chaoskey_open(struct inode *inode, struct file *file)
{
struct chaoskey *dev;
struct usb_interface *interface;
/* get the interface from minor number and driver information */
interface = usb_find_interface(&chaoskey_driver, iminor(inode));
if (!interface)
return -ENODEV;
usb_dbg(interface, "open");
dev = usb_get_intfdata(interface);
if (!dev) {
usb_dbg(interface, "open (dev)");
return -ENODEV;
}
file->private_data = dev;
mutex_lock(&dev->lock);
++dev->open;
mutex_unlock(&dev->lock);
usb_dbg(interface, "open success");
return 0;
}
static int chaoskey_release(struct inode *inode, struct file *file)
{
struct chaoskey *dev = file->private_data;
struct usb_interface *interface;
if (dev == NULL)
return -ENODEV;
interface = dev->interface;
usb_dbg(interface, "release");
mutex_lock(&dev->lock);
usb_dbg(interface, "open count at release is %d", dev->open);
if (dev->open <= 0) {
usb_dbg(interface, "invalid open count (%d)", dev->open);
mutex_unlock(&dev->lock);
return -ENODEV;
}
--dev->open;
if (!dev->present) {
if (dev->open == 0) {
mutex_unlock(&dev->lock);
chaoskey_free(dev);
} else
mutex_unlock(&dev->lock);
} else
mutex_unlock(&dev->lock);
usb_dbg(interface, "release success");
return 0;
}
static void chaos_read_callback(struct urb *urb)
{
struct chaoskey *dev = urb->context;
int status = urb->status;
usb_dbg(dev->interface, "callback status (%d)", status);
if (status == 0)
dev->valid = urb->actual_length;
else
dev->valid = 0;
dev->used = 0;
/* must be seen first before validity is announced */
smp_wmb();
dev->reading = false;
wake_up(&dev->wait_q);
}
/* Fill the buffer. Called with dev->lock held
*/
static int _chaoskey_fill(struct chaoskey *dev)
{
DEFINE_WAIT(wait);
int result;
bool started;
usb_dbg(dev->interface, "fill");
/* Return immediately if someone called before the buffer was
* empty */
if (dev->valid != dev->used) {
usb_dbg(dev->interface, "not empty yet (valid %d used %d)",
dev->valid, dev->used);
return 0;
}
/* Bail if the device has been removed */
if (!dev->present) {
usb_dbg(dev->interface, "device not present");
return -ENODEV;
}
/* Make sure the device is awake */
result = usb_autopm_get_interface(dev->interface);
if (result) {
usb_dbg(dev->interface, "wakeup failed (result %d)", result);
return result;
}
dev->reading = true;
result = usb_submit_urb(dev->urb, GFP_KERNEL);
if (result < 0) {
result = usb_translate_errors(result);
dev->reading = false;
goto out;
}
/* The first read on the Alea takes a little under 2 seconds.
* Reads after the first read take only a few microseconds
* though. Presumably the entropy-generating circuit needs
* time to ramp up. So, we wait longer on the first read.
*/
started = dev->reads_started;
dev->reads_started = true;
result = wait_event_interruptible_timeout(
dev->wait_q,
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
if (result < 0) {
usb_kill_urb(dev->urb);
goto out;
}
if (result == 0) {
result = -ETIMEDOUT;
usb_kill_urb(dev->urb);
} else {
result = dev->valid;
}
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
usb_dbg(dev->interface, "read %d bytes", dev->valid);
return result;
}
static ssize_t chaoskey_read(struct file *file,
char __user *buffer,
size_t count,
loff_t *ppos)
{
struct chaoskey *dev;
ssize_t read_count = 0;
int this_time;
int result = 0;
unsigned long remain;
dev = file->private_data;
if (dev == NULL || !dev->present)
return -ENODEV;
usb_dbg(dev->interface, "read %zu", count);
while (count > 0) {
/* Grab the rng_lock briefly to ensure that the hwrng interface
* gets priority over other user access
*/
result = mutex_lock_interruptible(&dev->rng_lock);
if (result)
goto bail;
mutex_unlock(&dev->rng_lock);
result = mutex_lock_interruptible(&dev->lock);
if (result)
goto bail;
if (dev->valid == dev->used) {
result = _chaoskey_fill(dev);
if (result < 0) {
mutex_unlock(&dev->lock);
goto bail;
}
}
this_time = dev->valid - dev->used;
if (this_time > count)
this_time = count;
remain = copy_to_user(buffer, dev->buf + dev->used, this_time);
if (remain) {
result = -EFAULT;
/* Consume the bytes that were copied so we don't leak
* data to user space
*/
dev->used += this_time - remain;
mutex_unlock(&dev->lock);
goto bail;
}
count -= this_time;
read_count += this_time;
buffer += this_time;
dev->used += this_time;
mutex_unlock(&dev->lock);
}
bail:
if (read_count) {
usb_dbg(dev->interface, "read %zu bytes", read_count);
return read_count;
}
usb_dbg(dev->interface, "empty read, result %d", result);
if (result == -ETIMEDOUT)
result = -EAGAIN;
return result;
}
static int chaoskey_rng_read(struct hwrng *rng, void *data,
size_t max, bool wait)
{
struct chaoskey *dev = container_of(rng, struct chaoskey, hwrng);
int this_time;
usb_dbg(dev->interface, "rng_read max %zu wait %d", max, wait);
if (!dev->present) {
usb_dbg(dev->interface, "device not present");
return 0;
}
/* Hold the rng_lock until we acquire the device lock so that
* this operation gets priority over other user access to the
* device
*/
mutex_lock(&dev->rng_lock);
mutex_lock(&dev->lock);
mutex_unlock(&dev->rng_lock);
/* Try to fill the buffer if empty. It doesn't actually matter
* if _chaoskey_fill works; we'll just return zero bytes as
* the buffer will still be empty
*/
if (dev->valid == dev->used)
(void) _chaoskey_fill(dev);
this_time = dev->valid - dev->used;
if (this_time > max)
this_time = max;
memcpy(data, dev->buf + dev->used, this_time);
dev->used += this_time;
mutex_unlock(&dev->lock);
usb_dbg(dev->interface, "rng_read this_time %d\n", this_time);
return this_time;
}
#ifdef CONFIG_PM
static int chaoskey_suspend(struct usb_interface *interface,
pm_message_t message)
{
usb_dbg(interface, "suspend");
return 0;
}
static int chaoskey_resume(struct usb_interface *interface)
{
struct chaoskey *dev;
struct usb_device *udev = interface_to_usbdev(interface);
usb_dbg(interface, "resume");
dev = usb_get_intfdata(interface);
/*
* We may have lost power.
* In that case the device that needs a long time
* for the first requests needs an extended timeout
* again
*/
if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
dev->reads_started = false;
return 0;
}
#else
#define chaoskey_suspend NULL
#define chaoskey_resume NULL
#endif
/* file operation pointers */
static const struct file_operations chaoskey_fops = {
.owner = THIS_MODULE,
.read = chaoskey_read,
.open = chaoskey_open,
.release = chaoskey_release,
.llseek = default_llseek,
};
/* class driver information */
static struct usb_class_driver chaoskey_class = {
.name = "chaoskey%d",
.fops = &chaoskey_fops,
.minor_base = USB_CHAOSKEY_MINOR_BASE,
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver chaoskey_driver = {
.name = DRIVER_SHORT,
.probe = chaoskey_probe,
.disconnect = chaoskey_disconnect,
.suspend = chaoskey_suspend,
.resume = chaoskey_resume,
.reset_resume = chaoskey_resume,
.id_table = chaoskey_table,
.supports_autosuspend = 1,
};
module_usb_driver(chaoskey_driver);
| linux-master | drivers/usb/misc/chaoskey.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for onboard USB hubs
*
* Copyright (c) 2022, Google LLC
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/sysfs.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/onboard_hub.h>
#include <linux/workqueue.h>
#include "onboard_usb_hub.h"
/*
* Use generic names, as the actual names might differ between hubs. If a new
* hub requires more than the currently supported supplies, add a new one here.
*/
static const char * const supply_names[] = {
"vdd",
"vdd2",
};
#define MAX_SUPPLIES ARRAY_SIZE(supply_names)
static void onboard_hub_attach_usb_driver(struct work_struct *work);
static struct usb_device_driver onboard_hub_usbdev_driver;
static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
/************************** Platform driver **************************/
struct usbdev_node {
struct usb_device *udev;
struct list_head list;
};
struct onboard_hub {
struct regulator_bulk_data supplies[MAX_SUPPLIES];
struct device *dev;
const struct onboard_hub_pdata *pdata;
struct gpio_desc *reset_gpio;
bool always_powered_in_suspend;
bool is_powered_on;
bool going_away;
struct list_head udev_list;
struct mutex lock;
};
static int onboard_hub_power_on(struct onboard_hub *hub)
{
int err;
err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to enable supplies: %d\n", err);
return err;
}
fsleep(hub->pdata->reset_us);
gpiod_set_value_cansleep(hub->reset_gpio, 0);
hub->is_powered_on = true;
return 0;
}
static int onboard_hub_power_off(struct onboard_hub *hub)
{
int err;
gpiod_set_value_cansleep(hub->reset_gpio, 1);
err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(hub->dev, "failed to disable supplies: %d\n", err);
return err;
}
hub->is_powered_on = false;
return 0;
}
static int __maybe_unused onboard_hub_suspend(struct device *dev)
{
struct onboard_hub *hub = dev_get_drvdata(dev);
struct usbdev_node *node;
bool power_off = true;
if (hub->always_powered_in_suspend)
return 0;
mutex_lock(&hub->lock);
list_for_each_entry(node, &hub->udev_list, list) {
if (!device_may_wakeup(node->udev->bus->controller))
continue;
if (usb_wakeup_enabled_descendants(node->udev)) {
power_off = false;
break;
}
}
mutex_unlock(&hub->lock);
if (!power_off)
return 0;
return onboard_hub_power_off(hub);
}
static int __maybe_unused onboard_hub_resume(struct device *dev)
{
struct onboard_hub *hub = dev_get_drvdata(dev);
if (hub->is_powered_on)
return 0;
return onboard_hub_power_on(hub);
}
static inline void get_udev_link_name(const struct usb_device *udev, char *buf, size_t size)
{
snprintf(buf, size, "usb_dev.%s", dev_name(&udev->dev));
}
static int onboard_hub_add_usbdev(struct onboard_hub *hub, struct usb_device *udev)
{
struct usbdev_node *node;
char link_name[64];
int err;
mutex_lock(&hub->lock);
if (hub->going_away) {
err = -EINVAL;
goto error;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
err = -ENOMEM;
goto error;
}
node->udev = udev;
list_add(&node->list, &hub->udev_list);
mutex_unlock(&hub->lock);
get_udev_link_name(udev, link_name, sizeof(link_name));
WARN_ON(sysfs_create_link(&hub->dev->kobj, &udev->dev.kobj, link_name));
return 0;
error:
mutex_unlock(&hub->lock);
return err;
}
static void onboard_hub_remove_usbdev(struct onboard_hub *hub, const struct usb_device *udev)
{
struct usbdev_node *node;
char link_name[64];
get_udev_link_name(udev, link_name, sizeof(link_name));
sysfs_remove_link(&hub->dev->kobj, link_name);
mutex_lock(&hub->lock);
list_for_each_entry(node, &hub->udev_list, list) {
if (node->udev == udev) {
list_del(&node->list);
kfree(node);
break;
}
}
mutex_unlock(&hub->lock);
}
static ssize_t always_powered_in_suspend_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct onboard_hub *hub = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hub->always_powered_in_suspend);
}
static ssize_t always_powered_in_suspend_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct onboard_hub *hub = dev_get_drvdata(dev);
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret < 0)
return ret;
hub->always_powered_in_suspend = val;
return count;
}
static DEVICE_ATTR_RW(always_powered_in_suspend);
static struct attribute *onboard_hub_attrs[] = {
&dev_attr_always_powered_in_suspend.attr,
NULL,
};
ATTRIBUTE_GROUPS(onboard_hub);
static void onboard_hub_attach_usb_driver(struct work_struct *work)
{
int err;
err = driver_attach(&onboard_hub_usbdev_driver.drvwrap.driver);
if (err)
pr_err("Failed to attach USB driver: %d\n", err);
}
static int onboard_hub_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct onboard_hub *hub;
unsigned int i;
int err;
hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
of_id = of_match_device(onboard_hub_match, &pdev->dev);
if (!of_id)
return -ENODEV;
hub->pdata = of_id->data;
if (!hub->pdata)
return -EINVAL;
if (hub->pdata->num_supplies > MAX_SUPPLIES)
return dev_err_probe(dev, -EINVAL, "max %zu supplies supported!\n",
MAX_SUPPLIES);
for (i = 0; i < hub->pdata->num_supplies; i++)
hub->supplies[i].supply = supply_names[i];
err = devm_regulator_bulk_get(dev, hub->pdata->num_supplies, hub->supplies);
if (err) {
dev_err(dev, "Failed to get regulator supplies: %d\n", err);
return err;
}
hub->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(hub->reset_gpio))
return dev_err_probe(dev, PTR_ERR(hub->reset_gpio), "failed to get reset GPIO\n");
hub->dev = dev;
mutex_init(&hub->lock);
INIT_LIST_HEAD(&hub->udev_list);
dev_set_drvdata(dev, hub);
err = onboard_hub_power_on(hub);
if (err)
return err;
/*
* The USB driver might have been detached from the USB devices by
* onboard_hub_remove() (e.g. through an 'unbind' by userspace),
* make sure to re-attach it if needed.
*
* This needs to be done deferred to avoid self-deadlocks on systems
* with nested onboard hubs.
*/
schedule_work(&attach_usb_driver_work);
return 0;
}
static void onboard_hub_remove(struct platform_device *pdev)
{
struct onboard_hub *hub = dev_get_drvdata(&pdev->dev);
struct usbdev_node *node;
struct usb_device *udev;
hub->going_away = true;
mutex_lock(&hub->lock);
/* unbind the USB devices to avoid dangling references to this device */
while (!list_empty(&hub->udev_list)) {
node = list_first_entry(&hub->udev_list, struct usbdev_node, list);
udev = node->udev;
/*
* Unbinding the driver will call onboard_hub_remove_usbdev(),
* which acquires hub->lock. We must release the lock first.
*/
get_device(&udev->dev);
mutex_unlock(&hub->lock);
device_release_driver(&udev->dev);
put_device(&udev->dev);
mutex_lock(&hub->lock);
}
mutex_unlock(&hub->lock);
onboard_hub_power_off(hub);
}
MODULE_DEVICE_TABLE(of, onboard_hub_match);
static const struct dev_pm_ops __maybe_unused onboard_hub_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(onboard_hub_suspend, onboard_hub_resume)
};
static struct platform_driver onboard_hub_driver = {
.probe = onboard_hub_probe,
.remove_new = onboard_hub_remove,
.driver = {
.name = "onboard-usb-hub",
.of_match_table = onboard_hub_match,
.pm = pm_ptr(&onboard_hub_pm_ops),
.dev_groups = onboard_hub_groups,
},
};
/************************** USB driver **************************/
#define VENDOR_ID_CYPRESS 0x04b4
#define VENDOR_ID_GENESYS 0x05e3
#define VENDOR_ID_MICROCHIP 0x0424
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_TI 0x0451
#define VENDOR_ID_VIA 0x2109
/*
* Returns the onboard_hub platform device that is associated with the USB
* device passed as parameter.
*/
static struct onboard_hub *_find_onboard_hub(struct device *dev)
{
struct platform_device *pdev;
struct device_node *np;
struct onboard_hub *hub;
pdev = of_find_device_by_node(dev->of_node);
if (!pdev) {
np = of_parse_phandle(dev->of_node, "peer-hub", 0);
if (!np) {
dev_err(dev, "failed to find device node for peer hub\n");
return ERR_PTR(-EINVAL);
}
pdev = of_find_device_by_node(np);
of_node_put(np);
if (!pdev)
return ERR_PTR(-ENODEV);
}
hub = dev_get_drvdata(&pdev->dev);
put_device(&pdev->dev);
/*
* The presence of drvdata ('hub') indicates that the platform driver
* finished probing. This handles the case where (conceivably) we could
* be running at the exact same time as the platform driver's probe. If
* we detect the race we request probe deferral and we'll come back and
* try again.
*/
if (!hub)
return ERR_PTR(-EPROBE_DEFER);
return hub;
}
static int onboard_hub_usbdev_probe(struct usb_device *udev)
{
struct device *dev = &udev->dev;
struct onboard_hub *hub;
int err;
/* ignore supported hubs without device tree node */
if (!dev->of_node)
return -ENODEV;
hub = _find_onboard_hub(dev);
if (IS_ERR(hub))
return PTR_ERR(hub);
dev_set_drvdata(dev, hub);
err = onboard_hub_add_usbdev(hub, udev);
if (err)
return err;
return 0;
}
static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
{
struct onboard_hub *hub = dev_get_drvdata(&udev->dev);
onboard_hub_remove_usbdev(hub, udev);
}
static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */
{ USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
{ USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
{ USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 */
{}
};
MODULE_DEVICE_TABLE(usb, onboard_hub_id_table);
static struct usb_device_driver onboard_hub_usbdev_driver = {
.name = "onboard-usb-hub",
.probe = onboard_hub_usbdev_probe,
.disconnect = onboard_hub_usbdev_disconnect,
.generic_subclass = 1,
.supports_autosuspend = 1,
.id_table = onboard_hub_id_table,
};
static int __init onboard_hub_init(void)
{
int ret;
ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
if (ret)
return ret;
ret = platform_driver_register(&onboard_hub_driver);
if (ret)
usb_deregister_device_driver(&onboard_hub_usbdev_driver);
return ret;
}
module_init(onboard_hub_init);
static void __exit onboard_hub_exit(void)
{
usb_deregister_device_driver(&onboard_hub_usbdev_driver);
platform_driver_unregister(&onboard_hub_driver);
cancel_work_sync(&attach_usb_driver_work);
}
module_exit(onboard_hub_exit);
MODULE_AUTHOR("Matthias Kaehlcke <[email protected]>");
MODULE_DESCRIPTION("Driver for discrete onboard USB hubs");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/misc/onboard_usb_hub.c |
// SPDX-License-Identifier: GPL-2.0
/*****************************************************************************
* USBLCD Kernel Driver *
* Version 1.05 *
* (C) 2005 Georges Toth <[email protected]> *
* *
* This file is licensed under the GPL. See COPYING in the package. *
* Based on usb-skeleton.c 2.0 by Greg Kroah-Hartman ([email protected]) *
* *
* *
* 28.02.05 Complete rewrite of the original usblcd.c driver, *
* based on usb_skeleton.c. *
* This new driver allows more than one USB-LCD to be connected *
* and controlled, at once *
*****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#define DRIVER_VERSION "USBLCD Driver Version 1.05"
#define USBLCD_MINOR 144
#define IOCTL_GET_HARD_VERSION 1
#define IOCTL_GET_DRV_VERSION 2
static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
struct usb_lcd {
struct usb_device *udev; /* init: probe_lcd */
struct usb_interface *interface; /* the interface for
this device */
unsigned char *bulk_in_buffer; /* the buffer to receive
data */
size_t bulk_in_size; /* the size of the
receive buffer */
__u8 bulk_in_endpointAddr; /* the address of the
bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the
bulk out endpoint */
struct kref kref;
struct semaphore limit_sem; /* to stop writes at
full throttle from
using up all RAM */
struct usb_anchor submitted; /* URBs to wait for
before suspend */
struct rw_semaphore io_rwsem;
unsigned long disconnected:1;
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
#define USB_LCD_CONCURRENT_WRITES 5
static struct usb_driver lcd_driver;
static void lcd_delete(struct kref *kref)
{
struct usb_lcd *dev = to_lcd_dev(kref);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
}
static int lcd_open(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
struct usb_interface *interface;
int subminor, r;
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
pr_err("USBLCD: %s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
dev = usb_get_intfdata(interface);
/* increment our usage count for the device */
kref_get(&dev->kref);
/* grab a power reference */
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
return 0;
}
static int lcd_release(struct inode *inode, struct file *file)
{
struct usb_lcd *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* decrement the count on our device */
usb_autopm_put_interface(dev->interface);
kref_put(&dev->kref, lcd_delete);
return 0;
}
static ssize_t lcd_read(struct file *file, char __user * buffer,
size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
int retval = 0;
int bytes_read;
dev = file->private_data;
down_read(&dev->io_rwsem);
if (dev->disconnected) {
retval = -ENODEV;
goto out_up_io;
}
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev,
dev->bulk_in_endpointAddr),
dev->bulk_in_buffer,
min(dev->bulk_in_size, count),
&bytes_read, 10000);
/* if the read was successful, copy the data to userspace */
if (!retval) {
if (copy_to_user(buffer, dev->bulk_in_buffer, bytes_read))
retval = -EFAULT;
else
retval = bytes_read;
}
out_up_io:
up_read(&dev->io_rwsem);
return retval;
}
static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct usb_lcd *dev;
u16 bcdDevice;
char buf[30];
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
switch (cmd) {
case IOCTL_GET_HARD_VERSION:
bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
sprintf(buf, "%1d%1d.%1d%1d",
(bcdDevice & 0xF000)>>12,
(bcdDevice & 0xF00)>>8,
(bcdDevice & 0xF0)>>4,
(bcdDevice & 0xF));
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
case IOCTL_GET_DRV_VERSION:
sprintf(buf, DRIVER_VERSION);
if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
return -EFAULT;
break;
default:
return -ENOTTY;
}
return 0;
}
static void lcd_write_bulk_callback(struct urb *urb)
{
struct usb_lcd *dev;
int status = urb->status;
dev = urb->context;
/* sync/async unlink faults aren't errors */
if (status &&
!(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN)) {
dev_dbg(&dev->interface->dev,
"nonzero write bulk status received: %d\n", status);
}
/* free up our allocated buffer */
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
up(&dev->limit_sem);
}
static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
size_t count, loff_t *ppos)
{
struct usb_lcd *dev;
int retval = 0, r;
struct urb *urb = NULL;
char *buf = NULL;
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
r = down_interruptible(&dev->limit_sem);
if (r < 0)
return -EINTR;
down_read(&dev->io_rwsem);
if (dev->disconnected) {
retval = -ENODEV;
goto err_up_io;
}
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
goto err_up_io;
}
buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
goto error;
}
if (copy_from_user(buf, user_buffer, count)) {
retval = -EFAULT;
goto error;
}
/* initialize the urb properly */
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out_endpointAddr),
buf, count, lcd_write_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->submitted);
/* send the data out the bulk port */
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"%s - failed submitting write urb, error %d\n",
__func__, retval);
goto error_unanchor;
}
/* release our reference to this urb,
the USB core will eventually free it entirely */
usb_free_urb(urb);
up_read(&dev->io_rwsem);
exit:
return count;
error_unanchor:
usb_unanchor_urb(urb);
error:
usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
err_up_io:
up_read(&dev->io_rwsem);
up(&dev->limit_sem);
return retval;
}
static const struct file_operations lcd_fops = {
.owner = THIS_MODULE,
.read = lcd_read,
.write = lcd_write,
.open = lcd_open,
.unlocked_ioctl = lcd_ioctl,
.release = lcd_release,
.llseek = noop_llseek,
};
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver lcd_class = {
.name = "lcd%d",
.fops = &lcd_fops,
.minor_base = USBLCD_MINOR,
};
static int lcd_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_lcd *dev = NULL;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
int i;
int retval;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
init_rwsem(&dev->io_rwsem);
init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
if (le16_to_cpu(dev->udev->descriptor.idProduct) != 0x0001) {
dev_warn(&interface->dev, "USBLCD model not supported.\n");
retval = -ENODEV;
goto error;
}
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
retval = usb_find_common_endpoints(interface->cur_altsetting,
&bulk_in, &bulk_out, NULL, NULL);
if (retval) {
dev_err(&interface->dev,
"Could not find both bulk-in and bulk-out endpoints\n");
goto error;
}
dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
retval = -ENOMEM;
goto error;
}
dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &lcd_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
goto error;
}
i = le16_to_cpu(dev->udev->descriptor.bcdDevice);
dev_info(&interface->dev, "USBLCD Version %1d%1d.%1d%1d found "
"at address %d\n", (i & 0xF000)>>12, (i & 0xF00)>>8,
(i & 0xF0)>>4, (i & 0xF), dev->udev->devnum);
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "USB LCD device now attached to USBLCD-%d\n",
interface->minor);
return 0;
error:
kref_put(&dev->kref, lcd_delete);
return retval;
}
static void lcd_draw_down(struct usb_lcd *dev)
{
int time;
time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
if (!time)
usb_kill_anchored_urbs(&dev->submitted);
}
static int lcd_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_lcd *dev = usb_get_intfdata(intf);
if (!dev)
return 0;
lcd_draw_down(dev);
return 0;
}
static int lcd_resume(struct usb_interface *intf)
{
return 0;
}
static void lcd_disconnect(struct usb_interface *interface)
{
struct usb_lcd *dev = usb_get_intfdata(interface);
int minor = interface->minor;
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
down_write(&dev->io_rwsem);
dev->disconnected = 1;
up_write(&dev->io_rwsem);
usb_kill_anchored_urbs(&dev->submitted);
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
dev_info(&interface->dev, "USB LCD #%d now disconnected\n", minor);
}
static struct usb_driver lcd_driver = {
.name = "usblcd",
.probe = lcd_probe,
.disconnect = lcd_disconnect,
.suspend = lcd_suspend,
.resume = lcd_resume,
.id_table = id_table,
.supports_autosuspend = 1,
};
module_usb_driver(lcd_driver);
MODULE_AUTHOR("Georges Toth <[email protected]>");
MODULE_DESCRIPTION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/usblcd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* PlayStation 2 Trance Vibrator driver
*
* Copyright (C) 2006 Sam Hocevar <[email protected]>
*/
/* Standard include files */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Sam Hocevar, [email protected]"
#define DRIVER_DESC "PlayStation 2 Trance Vibrator driver"
#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE (usb, id_table);
/* Driver-local specific stuff */
struct trancevibrator {
struct usb_device *udev;
unsigned int speed;
};
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct trancevibrator *tv = usb_get_intfdata(intf);
return sprintf(buf, "%d\n", tv->speed);
}
static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct trancevibrator *tv = usb_get_intfdata(intf);
int temp, retval, old;
retval = kstrtoint(buf, 10, &temp);
if (retval)
return retval;
if (temp > 255)
temp = 255;
else if (temp < 0)
temp = 0;
old = tv->speed;
tv->speed = temp;
dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed);
/* Set speed */
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
0x01, /* vendor request: set speed */
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
tv->speed, /* speed value */
0, NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval) {
tv->speed = old;
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
return retval;
}
return count;
}
static DEVICE_ATTR_RW(speed);
static struct attribute *tv_attrs[] = {
&dev_attr_speed.attr,
NULL,
};
ATTRIBUTE_GROUPS(tv);
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct trancevibrator *dev;
int retval;
dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL);
if (!dev) {
retval = -ENOMEM;
goto error;
}
dev->udev = usb_get_dev(udev);
usb_set_intfdata(interface, dev);
return 0;
error:
kfree(dev);
return retval;
}
static void tv_disconnect(struct usb_interface *interface)
{
struct trancevibrator *dev;
dev = usb_get_intfdata (interface);
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
}
/* USB subsystem object */
static struct usb_driver tv_driver = {
.name = "trancevibrator",
.probe = tv_probe,
.disconnect = tv_disconnect,
.id_table = id_table,
.dev_groups = tv_groups,
};
module_usb_driver(tv_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/trancevibrator.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Broadcom */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/gpio/consumer.h>
struct out_pin {
u32 enable_mask;
u32 value_mask;
u32 changed_mask;
u32 clr_changed_mask;
struct gpio_desc *gpiod;
const char *name;
};
struct in_pin {
u32 enable_mask;
u32 value_mask;
struct gpio_desc *gpiod;
const char *name;
struct brcmstb_usb_pinmap_data *pdata;
};
struct brcmstb_usb_pinmap_data {
void __iomem *regs;
int in_count;
struct in_pin *in_pins;
int out_count;
struct out_pin *out_pins;
};
static void pinmap_set(void __iomem *reg, u32 mask)
{
u32 val;
val = readl(reg);
val |= mask;
writel(val, reg);
}
static void pinmap_unset(void __iomem *reg, u32 mask)
{
u32 val;
val = readl(reg);
val &= ~mask;
writel(val, reg);
}
static void sync_in_pin(struct in_pin *pin)
{
u32 val;
val = gpiod_get_value(pin->gpiod);
if (val)
pinmap_set(pin->pdata->regs, pin->value_mask);
else
pinmap_unset(pin->pdata->regs, pin->value_mask);
}
/*
* Interrupt from override register, propagate from override bit
* to GPIO.
*/
static irqreturn_t brcmstb_usb_pinmap_ovr_isr(int irq, void *dev_id)
{
struct brcmstb_usb_pinmap_data *pdata = dev_id;
struct out_pin *pout;
u32 val;
u32 bit;
int x;
pr_debug("%s: reg: 0x%x\n", __func__, readl(pdata->regs));
pout = pdata->out_pins;
for (x = 0; x < pdata->out_count; x++) {
val = readl(pdata->regs);
if (val & pout->changed_mask) {
pinmap_set(pdata->regs, pout->clr_changed_mask);
pinmap_unset(pdata->regs, pout->clr_changed_mask);
bit = val & pout->value_mask;
gpiod_set_value(pout->gpiod, bit ? 1 : 0);
pr_debug("%s: %s bit changed state to %d\n",
__func__, pout->name, bit ? 1 : 0);
}
}
return IRQ_HANDLED;
}
/*
* Interrupt from GPIO, propagate from GPIO to override bit.
*/
static irqreturn_t brcmstb_usb_pinmap_gpio_isr(int irq, void *dev_id)
{
struct in_pin *pin = dev_id;
pr_debug("%s: %s pin changed state\n", __func__, pin->name);
sync_in_pin(pin);
return IRQ_HANDLED;
}
static void get_pin_counts(struct device_node *dn, int *in_count,
int *out_count)
{
int in;
int out;
*in_count = 0;
*out_count = 0;
in = of_property_count_strings(dn, "brcm,in-functions");
if (in < 0)
return;
out = of_property_count_strings(dn, "brcm,out-functions");
if (out < 0)
return;
*in_count = in;
*out_count = out;
}
static int parse_pins(struct device *dev, struct device_node *dn,
struct brcmstb_usb_pinmap_data *pdata)
{
struct out_pin *pout;
struct in_pin *pin;
int index;
int res;
int x;
pin = pdata->in_pins;
for (x = 0, index = 0; x < pdata->in_count; x++) {
pin->gpiod = devm_gpiod_get_index(dev, "in", x, GPIOD_IN);
if (IS_ERR(pin->gpiod)) {
dev_err(dev, "Error getting gpio %s\n", pin->name);
return PTR_ERR(pin->gpiod);
}
res = of_property_read_string_index(dn, "brcm,in-functions", x,
&pin->name);
if (res < 0) {
dev_err(dev, "Error getting brcm,in-functions for %s\n",
pin->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,in-masks", index++,
&pin->enable_mask);
if (res < 0) {
dev_err(dev, "Error getting 1st brcm,in-masks for %s\n",
pin->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,in-masks", index++,
&pin->value_mask);
if (res < 0) {
dev_err(dev, "Error getting 2nd brcm,in-masks for %s\n",
pin->name);
return res;
}
pin->pdata = pdata;
pin++;
}
pout = pdata->out_pins;
for (x = 0, index = 0; x < pdata->out_count; x++) {
pout->gpiod = devm_gpiod_get_index(dev, "out", x,
GPIOD_OUT_HIGH);
if (IS_ERR(pout->gpiod)) {
dev_err(dev, "Error getting gpio %s\n", pin->name);
return PTR_ERR(pout->gpiod);
}
res = of_property_read_string_index(dn, "brcm,out-functions", x,
&pout->name);
if (res < 0) {
dev_err(dev, "Error getting brcm,out-functions for %s\n",
pout->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,out-masks", index++,
&pout->enable_mask);
if (res < 0) {
dev_err(dev, "Error getting 1st brcm,out-masks for %s\n",
pout->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,out-masks", index++,
&pout->value_mask);
if (res < 0) {
dev_err(dev, "Error getting 2nd brcm,out-masks for %s\n",
pout->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,out-masks", index++,
&pout->changed_mask);
if (res < 0) {
dev_err(dev, "Error getting 3rd brcm,out-masks for %s\n",
pout->name);
return res;
}
res = of_property_read_u32_index(dn, "brcm,out-masks", index++,
&pout->clr_changed_mask);
if (res < 0) {
dev_err(dev, "Error getting 4th out-masks for %s\n",
pout->name);
return res;
}
pout++;
}
return 0;
}
static void sync_all_pins(struct brcmstb_usb_pinmap_data *pdata)
{
struct out_pin *pout;
struct in_pin *pin;
int val;
int x;
/*
* Enable the override, clear any changed condition and
* propagate the state to the GPIO for all out pins.
*/
pout = pdata->out_pins;
for (x = 0; x < pdata->out_count; x++) {
pinmap_set(pdata->regs, pout->enable_mask);
pinmap_set(pdata->regs, pout->clr_changed_mask);
pinmap_unset(pdata->regs, pout->clr_changed_mask);
val = readl(pdata->regs) & pout->value_mask;
gpiod_set_value(pout->gpiod, val ? 1 : 0);
pout++;
}
/* sync and enable all in pins. */
pin = pdata->in_pins;
for (x = 0; x < pdata->in_count; x++) {
sync_in_pin(pin);
pinmap_set(pdata->regs, pin->enable_mask);
pin++;
}
}
static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct brcmstb_usb_pinmap_data *pdata;
struct in_pin *pin;
struct resource *r;
int out_count;
int in_count;
int err;
int irq;
int x;
get_pin_counts(dn, &in_count, &out_count);
if ((in_count + out_count) == 0)
return -EINVAL;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -EINVAL;
pdata = devm_kzalloc(&pdev->dev,
sizeof(*pdata) +
(sizeof(struct in_pin) * in_count) +
(sizeof(struct out_pin) * out_count), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->in_count = in_count;
pdata->out_count = out_count;
pdata->in_pins = (struct in_pin *)(pdata + 1);
pdata->out_pins = (struct out_pin *)(pdata->in_pins + in_count);
pdata->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!pdata->regs)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
err = parse_pins(&pdev->dev, dn, pdata);
if (err)
return err;
sync_all_pins(pdata);
if (out_count) {
/* Enable interrupt for out pins */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq,
brcmstb_usb_pinmap_ovr_isr,
IRQF_TRIGGER_RISING,
pdev->name, pdata);
if (err < 0) {
dev_err(&pdev->dev, "Error requesting IRQ\n");
return err;
}
}
for (x = 0, pin = pdata->in_pins; x < pdata->in_count; x++, pin++) {
irq = gpiod_to_irq(pin->gpiod);
if (irq < 0) {
dev_err(&pdev->dev, "Error getting IRQ for %s pin\n",
pin->name);
return irq;
}
err = devm_request_irq(&pdev->dev, irq,
brcmstb_usb_pinmap_gpio_isr,
IRQF_SHARED | IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
pdev->name, pin);
if (err < 0) {
dev_err(&pdev->dev, "Error requesting IRQ for %s pin\n",
pin->name);
return err;
}
}
dev_dbg(&pdev->dev, "Driver probe succeeded\n");
dev_dbg(&pdev->dev, "In pin count: %d, out pin count: %d\n",
pdata->in_count, pdata->out_count);
return 0;
}
static const struct of_device_id brcmstb_usb_pinmap_of_match[] = {
{ .compatible = "brcm,usb-pinmap" },
{ },
};
static struct platform_driver brcmstb_usb_pinmap_driver = {
.driver = {
.name = "brcm-usb-pinmap",
.of_match_table = brcmstb_usb_pinmap_of_match,
},
};
static int __init brcmstb_usb_pinmap_init(void)
{
return platform_driver_probe(&brcmstb_usb_pinmap_driver,
brcmstb_usb_pinmap_probe);
}
module_init(brcmstb_usb_pinmap_init);
MODULE_AUTHOR("Al Cooper <[email protected]>");
MODULE_DESCRIPTION("Broadcom USB Pinmap Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/brcmstb-usb-pinmap.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Fast-charge control for Apple "MFi" devices
*
* Copyright (C) 2019 Bastien Nocera <[email protected]>
*/
/* Standard include files */
#include <linux/module.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/usb.h>
MODULE_AUTHOR("Bastien Nocera <[email protected]>");
MODULE_DESCRIPTION("Fast-charge control for Apple \"MFi\" devices");
MODULE_LICENSE("GPL");
#define TRICKLE_CURRENT_MA 0
#define FAST_CURRENT_MA 2500
#define APPLE_VENDOR_ID 0x05ac /* Apple */
/* The product ID is defined as starting with 0x12nn, as per the
* "Choosing an Apple Device USB Configuration" section in
* release R9 (2012) of the "MFi Accessory Hardware Specification"
*
* To distinguish an Apple device, a USB host can check the device
* descriptor of attached USB devices for the following fields:
* ■ Vendor ID: 0x05AC
* ■ Product ID: 0x12nn
*
* Those checks will be done in .match() and .probe().
*/
static const struct usb_device_id mfi_fc_id_table[] = {
{ .idVendor = APPLE_VENDOR_ID,
.match_flags = USB_DEVICE_ID_MATCH_VENDOR },
{},
};
MODULE_DEVICE_TABLE(usb, mfi_fc_id_table);
/* Driver-local specific stuff */
struct mfi_device {
struct usb_device *udev;
struct power_supply *battery;
int charge_type;
};
static int apple_mfi_fc_set_charge_type(struct mfi_device *mfi,
const union power_supply_propval *val)
{
int current_ma;
int retval;
__u8 request_type;
if (mfi->charge_type == val->intval) {
dev_dbg(&mfi->udev->dev, "charge type %d already set\n",
mfi->charge_type);
return 0;
}
switch (val->intval) {
case POWER_SUPPLY_CHARGE_TYPE_TRICKLE:
current_ma = TRICKLE_CURRENT_MA;
break;
case POWER_SUPPLY_CHARGE_TYPE_FAST:
current_ma = FAST_CURRENT_MA;
break;
default:
return -EINVAL;
}
request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
retval = usb_control_msg(mfi->udev, usb_sndctrlpipe(mfi->udev, 0),
0x40, /* Vendor‐defined power request */
request_type,
current_ma, /* wValue, current offset */
current_ma, /* wIndex, current offset */
NULL, 0, USB_CTRL_GET_TIMEOUT);
if (retval) {
dev_dbg(&mfi->udev->dev, "retval = %d\n", retval);
return retval;
}
mfi->charge_type = val->intval;
return 0;
}
static int apple_mfi_fc_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct mfi_device *mfi = power_supply_get_drvdata(psy);
dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = mfi->charge_type;
break;
case POWER_SUPPLY_PROP_SCOPE:
val->intval = POWER_SUPPLY_SCOPE_DEVICE;
break;
default:
return -ENODATA;
}
return 0;
}
static int apple_mfi_fc_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct mfi_device *mfi = power_supply_get_drvdata(psy);
int ret;
dev_dbg(&mfi->udev->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(&mfi->udev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&mfi->udev->dev);
return ret;
}
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
ret = apple_mfi_fc_set_charge_type(mfi, val);
break;
default:
ret = -EINVAL;
}
pm_runtime_mark_last_busy(&mfi->udev->dev);
pm_runtime_put_autosuspend(&mfi->udev->dev);
return ret;
}
static int apple_mfi_fc_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
return 1;
default:
return 0;
}
}
static enum power_supply_property apple_mfi_fc_properties[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_SCOPE
};
static const struct power_supply_desc apple_mfi_fc_desc = {
.name = "apple_mfi_fastcharge",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = apple_mfi_fc_properties,
.num_properties = ARRAY_SIZE(apple_mfi_fc_properties),
.get_property = apple_mfi_fc_get_property,
.set_property = apple_mfi_fc_set_property,
.property_is_writeable = apple_mfi_fc_property_is_writeable
};
static bool mfi_fc_match(struct usb_device *udev)
{
int idProduct;
idProduct = le16_to_cpu(udev->descriptor.idProduct);
/* See comment above mfi_fc_id_table[] */
return (idProduct >= 0x1200 && idProduct <= 0x12ff);
}
static int mfi_fc_probe(struct usb_device *udev)
{
struct power_supply_config battery_cfg = {};
struct mfi_device *mfi = NULL;
int err;
if (!mfi_fc_match(udev))
return -ENODEV;
mfi = kzalloc(sizeof(struct mfi_device), GFP_KERNEL);
if (!mfi)
return -ENOMEM;
battery_cfg.drv_data = mfi;
mfi->charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
mfi->battery = power_supply_register(&udev->dev,
&apple_mfi_fc_desc,
&battery_cfg);
if (IS_ERR(mfi->battery)) {
dev_err(&udev->dev, "Can't register battery\n");
err = PTR_ERR(mfi->battery);
kfree(mfi);
return err;
}
mfi->udev = usb_get_dev(udev);
dev_set_drvdata(&udev->dev, mfi);
return 0;
}
static void mfi_fc_disconnect(struct usb_device *udev)
{
struct mfi_device *mfi;
mfi = dev_get_drvdata(&udev->dev);
if (mfi->battery)
power_supply_unregister(mfi->battery);
dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(mfi->udev);
kfree(mfi);
}
static struct usb_device_driver mfi_fc_driver = {
.name = "apple-mfi-fastcharge",
.probe = mfi_fc_probe,
.disconnect = mfi_fc_disconnect,
.id_table = mfi_fc_id_table,
.match = mfi_fc_match,
.generic_subclass = 1,
};
static int __init mfi_fc_driver_init(void)
{
return usb_register_device_driver(&mfi_fc_driver, THIS_MODULE);
}
static void __exit mfi_fc_driver_exit(void)
{
usb_deregister_device_driver(&mfi_fc_driver);
}
module_init(mfi_fc_driver_init);
module_exit(mfi_fc_driver_exit);
| linux-master | drivers/usb/misc/apple-mfi-fastcharge.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for loading USB isight firmware
*
* Copyright (C) 2008 Matthew Garrett <[email protected]>
*
* The USB isight cameras in recent Apples are roughly compatible with the USB
* video class specification, and can be driven by uvcvideo. However, they
* need firmware to be loaded beforehand. After firmware loading, the device
* detaches from the USB bus and reattaches with a new device ID. It can then
* be claimed by the uvc driver.
*
* The firmware is non-free and must be extracted by the user. Tools to do this
* are available at http://bersace03.free.fr/ift/
*
* The isight firmware loading was reverse engineered by Johannes Berg
* <[email protected]>, and this driver is based on code by Ronald
* Bultje <[email protected]>
*/
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05ac, 0x8300)},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
static int isight_firmware_load(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
int llen, len, req, ret = 0;
const struct firmware *firmware;
unsigned char *buf = kmalloc(50, GFP_KERNEL);
unsigned char data[4];
const u8 *ptr;
if (!buf)
return -ENOMEM;
if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
printk(KERN_ERR "Unable to load isight firmware\n");
ret = -ENODEV;
goto out;
}
ptr = firmware->data;
buf[0] = 0x01;
if (usb_control_msg
(dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR
"Failed to initialise isight firmware loader\n");
ret = -ENODEV;
goto out;
}
while (ptr+4 <= firmware->data+firmware->size) {
memcpy(data, ptr, 4);
len = (data[0] << 8 | data[1]);
req = (data[2] << 8 | data[3]);
ptr += 4;
if (len == 0x8001)
break; /* success */
else if (len == 0)
continue;
for (; len > 0; req += 50) {
llen = min(len, 50);
len -= llen;
if (ptr+llen > firmware->data+firmware->size) {
printk(KERN_ERR
"Malformed isight firmware");
ret = -ENODEV;
goto out;
}
memcpy(buf, ptr, llen);
ptr += llen;
if (usb_control_msg
(dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, req, 0,
buf, llen, 300) != llen) {
printk(KERN_ERR
"Failed to load isight firmware\n");
ret = -ENODEV;
goto out;
}
}
}
buf[0] = 0x00;
if (usb_control_msg
(dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR "isight firmware loading completion failed\n");
ret = -ENODEV;
}
out:
kfree(buf);
release_firmware(firmware);
return ret;
}
MODULE_FIRMWARE("isight.fw");
static void isight_firmware_disconnect(struct usb_interface *intf)
{
}
static struct usb_driver isight_firmware_driver = {
.name = "isight_firmware",
.probe = isight_firmware_load,
.disconnect = isight_firmware_disconnect,
.id_table = id_table,
};
module_usb_driver(isight_firmware_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Garrett <[email protected]>");
| linux-master | drivers/usb/misc/isight_firmware.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Generic USB driver for report based interrupt in/out devices
* like LD Didactic's USB devices. LD Didactic's USB devices are
* HID devices which do not use HID report definitons (they use
* raw interrupt in and our reports only for communication).
*
* This driver uses a ring buffer for time critical reading of
* interrupt in reports and provides read and write methods for
* raw interrupt reports (similar to the Windows HID driver).
* Devices based on the book USB COMPLETE by Jan Axelson may need
* such a compatibility to the Windows HID driver.
*
* Copyright (C) 2005 Michael Hund <[email protected]>
*
* Derived from Lego USB Tower driver
* Copyright (C) 2003 David Glance <[email protected]>
* 2001-2004 Juergen Stuber <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/poll.h>
/* Define these values to match your devices */
#define USB_VENDOR_ID_LD 0x0f11 /* USB Vendor ID of LD Didactic GmbH */
#define USB_DEVICE_ID_LD_CASSY 0x1000 /* USB Product ID of CASSY-S modules with 8 bytes endpoint size */
#define USB_DEVICE_ID_LD_CASSY2 0x1001 /* USB Product ID of CASSY-S modules with 64 bytes endpoint size */
#define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 /* USB Product ID of Pocket-CASSY */
#define USB_DEVICE_ID_LD_POCKETCASSY2 0x1011 /* USB Product ID of Pocket-CASSY 2 (reserved) */
#define USB_DEVICE_ID_LD_MOBILECASSY 0x1020 /* USB Product ID of Mobile-CASSY */
#define USB_DEVICE_ID_LD_MOBILECASSY2 0x1021 /* USB Product ID of Mobile-CASSY 2 (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYVOLTAGE 0x1031 /* USB Product ID of Micro-CASSY Voltage */
#define USB_DEVICE_ID_LD_MICROCASSYCURRENT 0x1032 /* USB Product ID of Micro-CASSY Current */
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
#define USB_DEVICE_ID_LD_UMIC 0x10A0 /* USB Product ID of UMI C */
#define USB_DEVICE_ID_LD_UMIB 0x10B0 /* USB Product ID of UMI B */
#define USB_DEVICE_ID_LD_XRAY 0x1100 /* USB Product ID of X-Ray Apparatus 55481 */
#define USB_DEVICE_ID_LD_XRAY2 0x1101 /* USB Product ID of X-Ray Apparatus 554800 */
#define USB_DEVICE_ID_LD_XRAYCT 0x1110 /* USB Product ID of X-Ray Apparatus CT 554821*/
#define USB_DEVICE_ID_LD_VIDEOCOM 0x1200 /* USB Product ID of VideoCom */
#define USB_DEVICE_ID_LD_MOTOR 0x1210 /* USB Product ID of Motor (reserved) */
#define USB_DEVICE_ID_LD_COM3LAB 0x2000 /* USB Product ID of COM3LAB */
#define USB_DEVICE_ID_LD_TELEPORT 0x2010 /* USB Product ID of Terminal Adapter */
#define USB_DEVICE_ID_LD_NETWORKANALYSER 0x2020 /* USB Product ID of Network Analyser */
#define USB_DEVICE_ID_LD_POWERCONTROL 0x2030 /* USB Product ID of Converter Control Unit */
#define USB_DEVICE_ID_LD_MACHINETEST 0x2040 /* USB Product ID of Machine Test System */
#define USB_DEVICE_ID_LD_MOSTANALYSER 0x2050 /* USB Product ID of MOST Protocol Analyser */
#define USB_DEVICE_ID_LD_MOSTANALYSER2 0x2051 /* USB Product ID of MOST Protocol Analyser 2 */
#define USB_DEVICE_ID_LD_ABSESP 0x2060 /* USB Product ID of ABS ESP */
#define USB_DEVICE_ID_LD_AUTODATABUS 0x2070 /* USB Product ID of Automotive Data Buses */
#define USB_DEVICE_ID_LD_MCT 0x2080 /* USB Product ID of Microcontroller technique */
#define USB_DEVICE_ID_LD_HYBRID 0x2090 /* USB Product ID of Automotive Hybrid */
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0 /* USB Product ID of Heat control */
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_LD_MINOR_BASE 0
#else
#define USB_LD_MINOR_BASE 176
#endif
/* table of devices that work with this driver */
static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ld_usb_table);
MODULE_AUTHOR("Michael Hund <[email protected]>");
MODULE_DESCRIPTION("LD USB Driver");
MODULE_LICENSE("GPL");
/* All interrupt in transfers are collected in a ring buffer to
* avoid racing conditions and get better performance of the driver.
*/
static int ring_buffer_size = 128;
module_param(ring_buffer_size, int, 0000);
MODULE_PARM_DESC(ring_buffer_size, "Read ring buffer size in reports");
/* The write_buffer can contain more than one interrupt out transfer.
*/
static int write_buffer_size = 10;
module_param(write_buffer_size, int, 0000);
MODULE_PARM_DESC(write_buffer_size, "Write buffer size in reports");
/* As of kernel version 2.6.4 ehci-hcd uses an
* "only one interrupt transfer per frame" shortcut
* to simplify the scheduling of periodic transfers.
* This conflicts with our standard 1ms intervals for in and out URBs.
* We use default intervals of 2ms for in and 2ms for out transfers,
* which should be fast enough.
* Increase the interval to allow more devices that do interrupt transfers,
* or set to 1 to use the standard interval from the endpoint descriptors.
*/
static int min_interrupt_in_interval = 2;
module_param(min_interrupt_in_interval, int, 0000);
MODULE_PARM_DESC(min_interrupt_in_interval, "Minimum interrupt in interval in ms");
static int min_interrupt_out_interval = 2;
module_param(min_interrupt_out_interval, int, 0000);
MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in ms");
/* Structure to hold all of our device specific stuff */
struct ld_usb {
struct mutex mutex; /* locks this structure */
struct usb_interface *intf; /* save off the usb interface pointer */
unsigned long disconnected:1;
int open_count; /* number of times this port has been opened */
char *ring_buffer;
unsigned int ring_head;
unsigned int ring_tail;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
char *interrupt_in_buffer;
struct usb_endpoint_descriptor *interrupt_in_endpoint;
struct urb *interrupt_in_urb;
int interrupt_in_interval;
size_t interrupt_in_endpoint_size;
int interrupt_in_running;
int interrupt_in_done;
int buffer_overflow;
spinlock_t rbsl;
char *interrupt_out_buffer;
struct usb_endpoint_descriptor *interrupt_out_endpoint;
struct urb *interrupt_out_urb;
int interrupt_out_interval;
size_t interrupt_out_endpoint_size;
int interrupt_out_busy;
};
static struct usb_driver ld_usb_driver;
/*
* ld_usb_abort_transfers
* aborts transfers and frees associated data structures
*/
static void ld_usb_abort_transfers(struct ld_usb *dev)
{
/* shutdown transfer */
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
usb_kill_urb(dev->interrupt_in_urb);
}
if (dev->interrupt_out_busy)
usb_kill_urb(dev->interrupt_out_urb);
}
/*
* ld_usb_delete
*/
static void ld_usb_delete(struct ld_usb *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree(dev->ring_buffer);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
kfree(dev);
}
/*
* ld_usb_interrupt_in_callback
*/
static void ld_usb_interrupt_in_callback(struct urb *urb)
{
struct ld_usb *dev = urb->context;
size_t *actual_buffer;
unsigned int next_ring_head;
int status = urb->status;
unsigned long flags;
int retval;
if (status) {
if (status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN) {
goto exit;
} else {
dev_dbg(&dev->intf->dev,
"%s: nonzero status received: %d\n", __func__,
status);
spin_lock_irqsave(&dev->rbsl, flags);
goto resubmit; /* maybe we can recover */
}
}
spin_lock_irqsave(&dev->rbsl, flags);
if (urb->actual_length > 0) {
next_ring_head = (dev->ring_head+1) % ring_buffer_size;
if (next_ring_head != dev->ring_tail) {
actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_head * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
/* actual_buffer gets urb->actual_length + interrupt_in_buffer */
*actual_buffer = urb->actual_length;
memcpy(actual_buffer+1, dev->interrupt_in_buffer, urb->actual_length);
dev->ring_head = next_ring_head;
dev_dbg(&dev->intf->dev, "%s: received %d bytes\n",
__func__, urb->actual_length);
} else {
dev_warn(&dev->intf->dev,
"Ring buffer overflow, %d bytes dropped\n",
urb->actual_length);
dev->buffer_overflow = 1;
}
}
resubmit:
/* resubmit if we're still running */
if (dev->interrupt_in_running && !dev->buffer_overflow) {
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev,
"usb_submit_urb failed (%d)\n", retval);
dev->buffer_overflow = 1;
}
}
spin_unlock_irqrestore(&dev->rbsl, flags);
exit:
dev->interrupt_in_done = 1;
wake_up_interruptible(&dev->read_wait);
}
/*
* ld_usb_interrupt_out_callback
*/
static void ld_usb_interrupt_out_callback(struct urb *urb)
{
struct ld_usb *dev = urb->context;
int status = urb->status;
/* sync/async unlink faults aren't errors */
if (status && !(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN))
dev_dbg(&dev->intf->dev,
"%s - nonzero write interrupt status received: %d\n",
__func__, status);
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
}
/*
* ld_usb_open
*/
static int ld_usb_open(struct inode *inode, struct file *file)
{
struct ld_usb *dev;
int subminor;
int retval;
struct usb_interface *interface;
stream_open(inode, file);
subminor = iminor(inode);
interface = usb_find_interface(&ld_usb_driver, subminor);
if (!interface) {
printk(KERN_ERR "%s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
dev = usb_get_intfdata(interface);
if (!dev)
return -ENODEV;
/* lock this device */
if (mutex_lock_interruptible(&dev->mutex))
return -ERESTARTSYS;
/* allow opening only once */
if (dev->open_count) {
retval = -EBUSY;
goto unlock_exit;
}
dev->open_count = 1;
/* initialize in direction */
dev->ring_head = 0;
dev->ring_tail = 0;
dev->buffer_overflow = 0;
usb_fill_int_urb(dev->interrupt_in_urb,
interface_to_usbdev(interface),
usb_rcvintpipe(interface_to_usbdev(interface),
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
dev->interrupt_in_endpoint_size,
ld_usb_interrupt_in_callback,
dev,
dev->interrupt_in_interval);
dev->interrupt_in_running = 1;
dev->interrupt_in_done = 0;
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&interface->dev, "Couldn't submit interrupt_in_urb %d\n", retval);
dev->interrupt_in_running = 0;
dev->open_count = 0;
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
unlock_exit:
mutex_unlock(&dev->mutex);
return retval;
}
/*
* ld_usb_release
*/
static int ld_usb_release(struct inode *inode, struct file *file)
{
struct ld_usb *dev;
int retval = 0;
dev = file->private_data;
if (dev == NULL) {
retval = -ENODEV;
goto exit;
}
mutex_lock(&dev->mutex);
if (dev->open_count != 1) {
retval = -ENODEV;
goto unlock_exit;
}
if (dev->disconnected) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->mutex);
/* unlock here as ld_usb_delete frees dev */
ld_usb_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy)
wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
ld_usb_abort_transfers(dev);
dev->open_count = 0;
unlock_exit:
mutex_unlock(&dev->mutex);
exit:
return retval;
}
/*
* ld_usb_poll
*/
static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
{
struct ld_usb *dev;
__poll_t mask = 0;
dev = file->private_data;
if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
if (dev->ring_head != dev->ring_tail)
mask |= EPOLLIN | EPOLLRDNORM;
if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
/*
* ld_usb_read
*/
static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct ld_usb *dev;
size_t *actual_buffer;
size_t bytes_to_read;
int retval = 0;
int rv;
dev = file->private_data;
/* verify that we actually have some data to read */
if (count == 0)
goto exit;
/* lock this object */
if (mutex_lock_interruptible(&dev->mutex)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* wait for data */
spin_lock_irq(&dev->rbsl);
while (dev->ring_head == dev->ring_tail) {
dev->interrupt_in_done = 0;
spin_unlock_irq(&dev->rbsl);
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
if (retval < 0)
goto unlock_exit;
spin_lock_irq(&dev->rbsl);
}
spin_unlock_irq(&dev->rbsl);
/* actual_buffer contains actual_length + interrupt_in_buffer */
actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
if (*actual_buffer > dev->interrupt_in_endpoint_size) {
retval = -EIO;
goto unlock_exit;
}
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n",
*actual_buffer-bytes_to_read);
/* copy one interrupt_in_buffer from ring_buffer into userspace */
if (copy_to_user(buffer, actual_buffer+1, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
rv = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (rv < 0)
dev->buffer_overflow = 1;
} else {
spin_unlock_irq(&dev->rbsl);
}
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->mutex);
exit:
return retval;
}
/*
* ld_usb_write
*/
static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct ld_usb *dev;
size_t bytes_to_write;
int retval = 0;
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto exit;
/* lock this object */
if (mutex_lock_interruptible(&dev->mutex)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* wait until previous transfer is finished */
if (dev->interrupt_out_busy) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible(dev->write_wait, !dev->interrupt_out_busy);
if (retval < 0) {
goto unlock_exit;
}
}
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n",
count - bytes_to_write);
dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n",
__func__, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
if (dev->interrupt_out_endpoint == NULL) {
/* try HID_REQ_SET_REPORT=9 on control_endpoint instead of interrupt_out_endpoint */
retval = usb_control_msg(interface_to_usbdev(dev->intf),
usb_sndctrlpipe(interface_to_usbdev(dev->intf), 0),
9,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",
retval);
goto unlock_exit;
}
/* send off the urb */
usb_fill_int_urb(dev->interrupt_out_urb,
interface_to_usbdev(dev->intf),
usb_sndintpipe(interface_to_usbdev(dev->intf),
dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
ld_usb_interrupt_out_callback,
dev,
dev->interrupt_out_interval);
dev->interrupt_out_busy = 1;
wmb();
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->intf->dev,
"Couldn't submit interrupt_out_urb %d\n", retval);
goto unlock_exit;
}
retval = bytes_to_write;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->mutex);
exit:
return retval;
}
/* file operations needed when we register this driver */
static const struct file_operations ld_usb_fops = {
.owner = THIS_MODULE,
.read = ld_usb_read,
.write = ld_usb_write,
.open = ld_usb_open,
.release = ld_usb_release,
.poll = ld_usb_poll,
.llseek = no_llseek,
};
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver ld_usb_class = {
.name = "ldusb%d",
.fops = &ld_usb_fops,
.minor_base = USB_LD_MINOR_BASE,
};
/*
* ld_usb_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct ld_usb *dev = NULL;
struct usb_host_interface *iface_desc;
char *buffer;
int retval = -ENOMEM;
int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->mutex);
spin_lock_init(&dev->rbsl);
dev->intf = intf;
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
/* workaround for early firmware versions on fast computers */
if ((le16_to_cpu(udev->descriptor.idVendor) == USB_VENDOR_ID_LD) &&
((le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_LD_CASSY) ||
(le16_to_cpu(udev->descriptor.idProduct) == USB_DEVICE_ID_LD_COM3LAB)) &&
(le16_to_cpu(udev->descriptor.bcdDevice) <= 0x103)) {
buffer = kmalloc(256, GFP_KERNEL);
if (!buffer)
goto error;
/* usb_string makes SETUP+STALL to leave always ControlReadLoop */
usb_string(udev, 255, buffer, 256);
kfree(buffer);
}
iface_desc = intf->cur_altsetting;
res = usb_find_last_int_in_endpoint(iface_desc,
&dev->interrupt_in_endpoint);
if (res) {
dev_err(&intf->dev, "Interrupt in endpoint not found\n");
retval = res;
goto error;
}
res = usb_find_last_int_out_endpoint(iface_desc,
&dev->interrupt_out_endpoint);
if (res)
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
dev->ring_buffer = kcalloc(ring_buffer_size,
sizeof(size_t) + dev->interrupt_in_endpoint_size,
GFP_KERNEL);
if (!dev->ring_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_endpoint_size = dev->interrupt_out_endpoint ? usb_endpoint_maxp(dev->interrupt_out_endpoint) :
udev->descriptor.bMaxPacketSize0;
dev->interrupt_out_buffer =
kmalloc_array(write_buffer_size,
dev->interrupt_out_endpoint_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = max_t(int, min_interrupt_in_interval,
dev->interrupt_in_endpoint->bInterval);
if (dev->interrupt_out_endpoint)
dev->interrupt_out_interval = max_t(int, min_interrupt_out_interval,
dev->interrupt_out_endpoint->bInterval);
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, dev);
retval = usb_register_dev(intf, &ld_usb_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&intf->dev, "Not able to get a minor for this device.\n");
usb_set_intfdata(intf, NULL);
goto error;
}
/* let the user know what node this device is now attached to */
dev_info(&intf->dev, "LD USB Device #%d now attached to major %d minor %d\n",
(intf->minor - USB_LD_MINOR_BASE), USB_MAJOR, intf->minor);
exit:
return retval;
error:
ld_usb_delete(dev);
return retval;
}
/*
* ld_usb_disconnect
*
* Called by the usb core when the device is removed from the system.
*/
static void ld_usb_disconnect(struct usb_interface *intf)
{
struct ld_usb *dev;
int minor;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
minor = intf->minor;
/* give back our minor */
usb_deregister_dev(intf, &ld_usb_class);
usb_poison_urb(dev->interrupt_in_urb);
usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&dev->mutex);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->mutex);
ld_usb_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->mutex);
}
dev_info(&intf->dev, "LD USB Device #%d now disconnected\n",
(minor - USB_LD_MINOR_BASE));
}
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ld_usb_driver = {
.name = "ldusb",
.probe = ld_usb_probe,
.disconnect = ld_usb_disconnect,
.id_table = ld_usb_table,
};
module_usb_driver(ld_usb_driver);
| linux-master | drivers/usb/misc/ldusb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB 7 Segment Driver
*
* Copyright (C) 2008 Harrison Metzger <[email protected]>
* Based on usbled.c by Greg Kroah-Hartman ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Harrison Metzger <[email protected]>"
#define DRIVER_DESC "USB 7 Segment Driver"
#define VENDOR_ID 0x0fc5
#define PRODUCT_ID 0x1227
#define MAXLEN 8
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
/* the different text display modes the device is capable of */
static const char *display_textmodes[] = {"raw", "hex", "ascii"};
struct usb_sevsegdev {
struct usb_device *udev;
struct usb_interface *intf;
u8 powered;
u8 mode_msb;
u8 mode_lsb;
u8 decimals[MAXLEN];
u8 textmode;
u8 text[MAXLEN];
u16 textlength;
u8 shadow_power; /* for PM */
u8 has_interface_pm;
};
/* sysfs_streq can't replace this completely
* If the device was in hex mode, and the user wanted a 0,
* if str commands are used, we would assume the end of string
* so mem commands are used.
*/
static inline size_t my_memlen(const char *buf, size_t count)
{
if (count > 0 && buf[count-1] == '\n')
return count - 1;
else
return count;
}
static void update_display_powered(struct usb_sevsegdev *mydev)
{
int rc;
if (mydev->powered && !mydev->has_interface_pm) {
rc = usb_autopm_get_interface(mydev->intf);
if (rc < 0)
return;
mydev->has_interface_pm = 1;
}
if (mydev->shadow_power != 1)
return;
rc = usb_control_msg_send(mydev->udev, 0, 0x12, 0x48,
(80 * 0x100) + 10, /* (power mode) */
(0x00 * 0x100) + (mydev->powered ? 1 : 0),
NULL, 0, 2000, GFP_KERNEL);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
if (!mydev->powered && mydev->has_interface_pm) {
usb_autopm_put_interface(mydev->intf);
mydev->has_interface_pm = 0;
}
}
static void update_display_mode(struct usb_sevsegdev *mydev)
{
int rc;
if(mydev->shadow_power != 1)
return;
rc = usb_control_msg_send(mydev->udev, 0, 0x12, 0x48,
(82 * 0x100) + 10, /* (set mode) */
(mydev->mode_msb * 0x100) + mydev->mode_lsb,
NULL, 0, 2000, GFP_NOIO);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "mode retval = %d\n", rc);
}
static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
{
int rc;
int i;
unsigned char buffer[MAXLEN] = {0};
u8 decimals = 0;
if(mydev->shadow_power != 1)
return;
/* The device is right to left, where as you write left to right */
for (i = 0; i < mydev->textlength; i++)
buffer[i] = mydev->text[mydev->textlength-1-i];
rc = usb_control_msg_send(mydev->udev, 0, 0x12, 0x48,
(85 * 0x100) + 10, /* (write text) */
(0 * 0x100) + mydev->textmode, /* mode */
&buffer, mydev->textlength, 2000, mf);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "write retval = %d\n", rc);
/* The device is right to left, where as you write left to right */
for (i = 0; i < sizeof(mydev->decimals); i++)
decimals |= mydev->decimals[i] << i;
rc = usb_control_msg_send(mydev->udev, 0, 0x12, 0x48,
(86 * 0x100) + 10, /* (set decimal) */
(0 * 0x100) + decimals, /* decimals */
NULL, 0, 2000, mf);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "decimal retval = %d\n", rc);
}
#define MYDEV_ATTR_SIMPLE_UNSIGNED(name, update_fcn) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
return sprintf(buf, "%u\n", mydev->name); \
} \
\
static ssize_t name##_store(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
mydev->name = simple_strtoul(buf, NULL, 10); \
update_fcn(mydev); \
\
return count; \
} \
static DEVICE_ATTR_RW(name);
static ssize_t text_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
return sysfs_emit(buf, "%s\n", mydev->text);
}
static ssize_t text_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
size_t end = my_memlen(buf, count);
if (end > sizeof(mydev->text))
return -EINVAL;
memset(mydev->text, 0, sizeof(mydev->text));
mydev->textlength = end;
if (end > 0)
memcpy(mydev->text, buf, end);
update_display_visual(mydev, GFP_KERNEL);
return count;
}
static DEVICE_ATTR_RW(text);
static ssize_t decimals_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
int pos;
for (i = 0; i < sizeof(mydev->decimals); i++) {
pos = sizeof(mydev->decimals) - 1 - i;
if (mydev->decimals[i] == 0)
buf[pos] = '0';
else if (mydev->decimals[i] == 1)
buf[pos] = '1';
else
buf[pos] = 'x';
}
buf[sizeof(mydev->decimals)] = '\n';
return sizeof(mydev->decimals) + 1;
}
static ssize_t decimals_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
size_t end = my_memlen(buf, count);
int i;
if (end > sizeof(mydev->decimals))
return -EINVAL;
for (i = 0; i < end; i++)
if (buf[i] != '0' && buf[i] != '1')
return -EINVAL;
memset(mydev->decimals, 0, sizeof(mydev->decimals));
for (i = 0; i < end; i++)
if (buf[i] == '1')
mydev->decimals[end-1-i] = 1;
update_display_visual(mydev, GFP_KERNEL);
return count;
}
static DEVICE_ATTR_RW(decimals);
static ssize_t textmode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
buf[0] = 0;
for (i = 0; i < ARRAY_SIZE(display_textmodes); i++) {
if (mydev->textmode == i) {
strcat(buf, " [");
strcat(buf, display_textmodes[i]);
strcat(buf, "] ");
} else {
strcat(buf, " ");
strcat(buf, display_textmodes[i]);
strcat(buf, " ");
}
}
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t textmode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_sevsegdev *mydev = usb_get_intfdata(intf);
int i;
i = sysfs_match_string(display_textmodes, buf);
if (i < 0)
return i;
mydev->textmode = i;
update_display_visual(mydev, GFP_KERNEL);
return count;
}
static DEVICE_ATTR_RW(textmode);
MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_msb, update_display_mode);
MYDEV_ATTR_SIMPLE_UNSIGNED(mode_lsb, update_display_mode);
static struct attribute *sevseg_attrs[] = {
&dev_attr_powered.attr,
&dev_attr_text.attr,
&dev_attr_textmode.attr,
&dev_attr_decimals.attr,
&dev_attr_mode_msb.attr,
&dev_attr_mode_lsb.attr,
NULL
};
ATTRIBUTE_GROUPS(sevseg);
static int sevseg_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_sevsegdev *mydev;
int rc = -ENOMEM;
mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL);
if (!mydev)
goto error_mem;
mydev->udev = usb_get_dev(udev);
mydev->intf = interface;
usb_set_intfdata(interface, mydev);
/* PM */
mydev->shadow_power = 1; /* currently active */
mydev->has_interface_pm = 0; /* have not issued autopm_get */
/*set defaults */
mydev->textmode = 0x02; /* ascii mode */
mydev->mode_msb = 0x06; /* 6 characters */
mydev->mode_lsb = 0x3f; /* scanmode for 6 chars */
dev_info(&interface->dev, "USB 7 Segment device now attached\n");
return 0;
error_mem:
return rc;
}
static void sevseg_disconnect(struct usb_interface *interface)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
usb_put_dev(mydev->udev);
kfree(mydev);
dev_info(&interface->dev, "USB 7 Segment now disconnected\n");
}
static int sevseg_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 0;
return 0;
}
static int sevseg_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static int sevseg_reset_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static struct usb_driver sevseg_driver = {
.name = "usbsevseg",
.probe = sevseg_probe,
.disconnect = sevseg_disconnect,
.suspend = sevseg_suspend,
.resume = sevseg_resume,
.reset_resume = sevseg_reset_resume,
.id_table = id_table,
.dev_groups = sevseg_groups,
.supports_autosuspend = 1,
};
module_usb_driver(sevseg_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/usbsevseg.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/usb.h>
#define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
/*-------------------------------------------------------------------------*/
static int override_alt = -1;
module_param_named(alt, override_alt, int, 0644);
MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
static void complicated_callback(struct urb *urb);
/*-------------------------------------------------------------------------*/
/* FIXME make these public somewhere; usbdevfs.h? */
/* Parameter for usbtest driver. */
struct usbtest_param_32 {
/* inputs */
__u32 test_num; /* 0..(TEST_CASES-1) */
__u32 iterations;
__u32 length;
__u32 vary;
__u32 sglen;
/* outputs */
__s32 duration_sec;
__s32 duration_usec;
};
/*
* Compat parameter to the usbtest driver.
* This supports older user space binaries compiled with 64 bit compiler.
*/
struct usbtest_param_64 {
/* inputs */
__u32 test_num; /* 0..(TEST_CASES-1) */
__u32 iterations;
__u32 length;
__u32 vary;
__u32 sglen;
/* outputs */
__s64 duration_sec;
__s64 duration_usec;
};
/* IOCTL interface to the driver. */
#define USBTEST_REQUEST_32 _IOWR('U', 100, struct usbtest_param_32)
/* COMPAT IOCTL interface to the driver. */
#define USBTEST_REQUEST_64 _IOWR('U', 100, struct usbtest_param_64)
/*-------------------------------------------------------------------------*/
#define GENERIC /* let probe() bind using module params */
/* Some devices that can be used for testing will have "real" drivers.
* Entries for those need to be enabled here by hand, after disabling
* that "real" driver.
*/
//#define IBOT2 /* grab iBOT2 webcams */
//#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
/*-------------------------------------------------------------------------*/
struct usbtest_info {
const char *name;
u8 ep_in; /* bulk/intr source */
u8 ep_out; /* bulk/intr sink */
unsigned autoconf:1;
unsigned ctrl_out:1;
unsigned iso:1; /* try iso in/out */
unsigned intr:1; /* try interrupt in/out */
int alt;
};
/* this is accessed only through usbfs ioctl calls.
* one ioctl to issue a test ... one lock per device.
* tests create other threads if they need them.
* urbs and buffers are allocated dynamically,
* and data generated deterministically.
*/
struct usbtest_dev {
struct usb_interface *intf;
struct usbtest_info *info;
int in_pipe;
int out_pipe;
int in_iso_pipe;
int out_iso_pipe;
int in_int_pipe;
int out_int_pipe;
struct usb_endpoint_descriptor *iso_in, *iso_out;
struct usb_endpoint_descriptor *int_in, *int_out;
struct mutex lock;
#define TBUF_SIZE 256
u8 *buf;
};
static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
{
return interface_to_usbdev(test->intf);
}
/* set up all urbs so they can be used with either bulk or interrupt */
#define INTERRUPT_RATE 1 /* msec/transfer */
#define ERROR(tdev, fmt, args...) \
dev_err(&(tdev)->intf->dev , fmt , ## args)
#define WARNING(tdev, fmt, args...) \
dev_warn(&(tdev)->intf->dev , fmt , ## args)
#define GUARD_BYTE 0xA5
#define MAX_SGLEN 128
/*-------------------------------------------------------------------------*/
static inline void endpoint_update(int edi,
struct usb_host_endpoint **in,
struct usb_host_endpoint **out,
struct usb_host_endpoint *e)
{
if (edi) {
if (!*in)
*in = e;
} else {
if (!*out)
*out = e;
}
}
static int
get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt;
struct usb_host_endpoint *in, *out;
struct usb_host_endpoint *iso_in, *iso_out;
struct usb_host_endpoint *int_in, *int_out;
struct usb_device *udev;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = out = NULL;
iso_in = iso_out = NULL;
int_in = int_out = NULL;
alt = intf->altsetting + tmp;
if (override_alt >= 0 &&
override_alt != alt->desc.bAlternateSetting)
continue;
/* take the first altsetting with in-bulk + out-bulk;
* ignore other endpoints and altsettings.
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
int edi;
e = alt->endpoint + ep;
edi = usb_endpoint_dir_in(&e->desc);
switch (usb_endpoint_type(&e->desc)) {
case USB_ENDPOINT_XFER_BULK:
endpoint_update(edi, &in, &out, e);
continue;
case USB_ENDPOINT_XFER_INT:
if (dev->info->intr)
endpoint_update(edi, &int_in, &int_out, e);
continue;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
endpoint_update(edi, &iso_in, &iso_out, e);
fallthrough;
default:
continue;
}
}
if ((in && out) || iso_in || iso_out || int_in || int_out)
goto found;
}
return -EINVAL;
found:
udev = testdev_to_usbdev(dev);
dev->info->alt = alt->desc.bAlternateSetting;
if (alt->desc.bAlternateSetting != 0) {
tmp = usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
if (in)
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
if (out)
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
iso_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (iso_out) {
dev->iso_out = &iso_out->desc;
dev->out_iso_pipe = usb_sndisocpipe(udev,
iso_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (int_in) {
dev->int_in = &int_in->desc;
dev->in_int_pipe = usb_rcvintpipe(udev,
int_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (int_out) {
dev->int_out = &int_out->desc;
dev->out_int_pipe = usb_sndintpipe(udev,
int_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Support for testing basic non-queued I/O streams.
*
* These just package urbs as requests that can be easily canceled.
* Each urb's data buffer is dynamically allocated; callers can fill
* them with non-zero test data (or test for it) when appropriate.
*/
static void simple_callback(struct urb *urb)
{
complete(urb->context);
}
static struct urb *usbtest_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes,
unsigned transfer_flags,
unsigned offset,
u8 bInterval,
usb_complete_t complete_fn)
{
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return urb;
if (bInterval)
usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn,
NULL, bInterval);
else
usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn,
NULL);
urb->interval = (udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE;
urb->transfer_flags = transfer_flags;
if (usb_pipein(pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
if ((bytes + offset) == 0)
return urb;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL, &urb->transfer_dma);
else
urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
/* To test unaligned transfers add an offset and fill the
unused memory with a guard value */
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
return urb;
}
static struct urb *simple_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes,
u8 bInterval)
{
return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
bInterval, simple_callback);
}
static struct urb *complicated_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes,
u8 bInterval)
{
return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
bInterval, complicated_callback);
}
static unsigned pattern;
static unsigned mod_pattern;
module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
static unsigned get_maxpacket(struct usb_device *udev, int pipe)
{
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(udev, pipe);
return le16_to_cpup(&ep->desc.wMaxPacketSize);
}
static int ss_isoc_get_packet_num(struct usb_device *udev, int pipe)
{
struct usb_host_endpoint *ep = usb_pipe_endpoint(udev, pipe);
return USB_SS_MULT(ep->ss_ep_comp.bmAttributes)
* (1 + ep->ss_ep_comp.bMaxBurst);
}
static void simple_fill_buf(struct urb *urb)
{
unsigned i;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->transfer_buffer_length;
unsigned maxpacket;
switch (pattern) {
default:
fallthrough;
case 0:
memset(buf, 0, len);
break;
case 1: /* mod63 */
maxpacket = get_maxpacket(urb->dev, urb->pipe);
for (i = 0; i < len; i++)
*buf++ = (u8) ((i % maxpacket) % 63);
break;
}
}
static inline unsigned long buffer_offset(void *buf)
{
return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
}
static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
u8 *guard = buf - buffer_offset(buf);
unsigned i;
for (i = 0; guard < buf; i++, guard++) {
if (*guard != GUARD_BYTE) {
ERROR(tdev, "guard byte[%d] %d (not %d)\n",
i, *guard, GUARD_BYTE);
return -EINVAL;
}
}
return 0;
}
static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
{
unsigned i;
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe);
int ret = check_guard_bytes(tdev, urb);
if (ret)
return ret;
for (i = 0; i < len; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
case 0:
expected = 0;
break;
/* mod63 stays in sync with short-terminated transfers,
* or otherwise when host and gadget agree on how large
* each usb transfer request should be. resync is done
* with set_interface or set_config.
*/
case 1: /* mod63 */
expected = (i % maxpacket) % 63;
break;
/* always fail unsupported patterns */
default:
expected = !*buf;
break;
}
if (*buf == expected)
continue;
ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
return -EINVAL;
}
return 0;
}
static void simple_free_urb(struct urb *urb)
{
unsigned long offset = buffer_offset(urb->transfer_buffer);
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
usb_free_coherent(
urb->dev,
urb->transfer_buffer_length + offset,
urb->transfer_buffer - offset,
urb->transfer_dma - offset);
else
kfree(urb->transfer_buffer - offset);
usb_free_urb(urb);
}
static int simple_io(
struct usbtest_dev *tdev,
struct urb *urb,
int iterations,
int vary,
int expected,
const char *label
)
{
struct usb_device *udev = urb->dev;
int max = urb->transfer_buffer_length;
struct completion completion;
int retval = 0;
unsigned long expire;
urb->context = &completion;
while (retval == 0 && iterations-- > 0) {
init_completion(&completion);
if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
urb->transfer_flags |= URB_ZERO_PACKET;
}
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0)
break;
expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
if (!wait_for_completion_timeout(&completion, expire)) {
usb_kill_urb(urb);
retval = (urb->status == -ENOENT ?
-ETIMEDOUT : urb->status);
} else {
retval = urb->status;
}
urb->dev = udev;
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(tdev, urb);
if (vary) {
int len = urb->transfer_buffer_length;
len += vary;
len %= max;
if (len == 0)
len = (vary < max) ? vary : max;
urb->transfer_buffer_length = len;
}
/* FIXME if endpoint halted, clear halt (and log) */
}
urb->transfer_buffer_length = max;
if (expected != retval)
dev_err(&udev->dev,
"%s failed, iterations left %d, status %d (not %d)\n",
label, iterations, retval, expected);
return retval;
}
/*-------------------------------------------------------------------------*/
/* We use scatterlist primitives to test queued I/O.
* Yes, this also tests the scatterlist primitives.
*/
static void free_sglist(struct scatterlist *sg, int nents)
{
unsigned i;
if (!sg)
return;
for (i = 0; i < nents; i++) {
if (!sg_page(&sg[i]))
continue;
kfree(sg_virt(&sg[i]));
}
kfree(sg);
}
static struct scatterlist *
alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
{
struct scatterlist *sg;
unsigned int n_size = 0;
unsigned i;
unsigned size = max;
unsigned maxpacket =
get_maxpacket(interface_to_usbdev(dev->intf), pipe);
if (max == 0)
return NULL;
sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, nents);
for (i = 0; i < nents; i++) {
char *buf;
unsigned j;
buf = kzalloc(size, GFP_KERNEL);
if (!buf) {
free_sglist(sg, i);
return NULL;
}
/* kmalloc pages are always physically contiguous! */
sg_set_buf(&sg[i], buf, size);
switch (pattern) {
case 0:
/* already zeroed */
break;
case 1:
for (j = 0; j < size; j++)
*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
n_size += size;
break;
}
if (vary) {
size += vary;
size %= max;
if (size == 0)
size = (vary < max) ? vary : max;
}
}
return sg;
}
struct sg_timeout {
struct timer_list timer;
struct usb_sg_request *req;
};
static void sg_timeout(struct timer_list *t)
{
struct sg_timeout *timeout = from_timer(timeout, t, timer);
usb_sg_cancel(timeout->req);
}
static int perform_sglist(
struct usbtest_dev *tdev,
unsigned iterations,
int pipe,
struct usb_sg_request *req,
struct scatterlist *sg,
int nents
)
{
struct usb_device *udev = testdev_to_usbdev(tdev);
int retval = 0;
struct sg_timeout timeout = {
.req = req,
};
timer_setup_on_stack(&timeout.timer, sg_timeout, 0);
while (retval == 0 && iterations-- > 0) {
retval = usb_sg_init(req, udev, pipe,
(udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE,
sg, nents, 0, GFP_KERNEL);
if (retval)
break;
mod_timer(&timeout.timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
if (!del_timer_sync(&timeout.timer))
retval = -ETIMEDOUT;
else
retval = req->status;
destroy_timer_on_stack(&timeout.timer);
/* FIXME check resulting data pattern */
/* FIXME if endpoint halted, clear halt (and log) */
}
/* FIXME for unlink or fault handling tests, don't report
* failure if retval is as we expected ...
*/
if (retval)
ERROR(tdev, "perform_sglist failed, "
"iterations left %d, status %d\n",
iterations, retval);
return retval;
}
/*-------------------------------------------------------------------------*/
/* unqueued control message testing
*
* there's a nice set of device functional requirements in chapter 9 of the
* usb 2.0 spec, which we can apply to ANY device, even ones that don't use
* special test firmware.
*
* we know the device is configured (or suspended) by the time it's visible
* through usbfs. we can't change that, so we won't test enumeration (which
* worked 'well enough' to get here, this time), power management (ditto),
* or remote wakeup (which needs human interaction).
*/
static unsigned realworld = 1;
module_param(realworld, uint, 0);
MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
static int get_altsetting(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int retval;
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
0, iface->altsetting[0].desc.bInterfaceNumber,
dev->buf, 1, USB_CTRL_GET_TIMEOUT);
switch (retval) {
case 1:
return dev->buf[0];
case 0:
retval = -ERANGE;
fallthrough;
default:
return retval;
}
}
static int set_altsetting(struct usbtest_dev *dev, int alternate)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev;
if (alternate < 0 || alternate >= 256)
return -EINVAL;
udev = interface_to_usbdev(iface);
return usb_set_interface(udev,
iface->altsetting[0].desc.bInterfaceNumber,
alternate);
}
static int is_good_config(struct usbtest_dev *tdev, int len)
{
struct usb_config_descriptor *config;
if (len < sizeof(*config))
return 0;
config = (struct usb_config_descriptor *) tdev->buf;
switch (config->bDescriptorType) {
case USB_DT_CONFIG:
case USB_DT_OTHER_SPEED_CONFIG:
if (config->bLength != 9) {
ERROR(tdev, "bogus config descriptor length\n");
return 0;
}
/* this bit 'must be 1' but often isn't */
if (!realworld && !(config->bmAttributes & 0x80)) {
ERROR(tdev, "high bit of config attributes not set\n");
return 0;
}
if (config->bmAttributes & 0x1f) { /* reserved == 0 */
ERROR(tdev, "reserved config bits set\n");
return 0;
}
break;
default:
return 0;
}
if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
return 1;
if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
return 1;
ERROR(tdev, "bogus config descriptor read size\n");
return 0;
}
static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ext_cap_descriptor *ext;
u32 attr;
ext = (struct usb_ext_cap_descriptor *) buf;
if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
return 0;
}
attr = le32_to_cpu(ext->bmAttributes);
/* bits[1:15] is used and others are reserved */
if (attr & ~0xfffe) { /* reserved == 0 */
ERROR(tdev, "reserved bits set\n");
return 0;
}
return 1;
}
static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ss_cap_descriptor *ss;
ss = (struct usb_ss_cap_descriptor *) buf;
if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
ERROR(tdev, "bogus superspeed device capability descriptor length\n");
return 0;
}
/*
* only bit[1] of bmAttributes is used for LTM and others are
* reserved
*/
if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in bmAttributes\n");
return 0;
}
/* bits[0:3] of wSpeedSupported is used and others are reserved */
if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
ERROR(tdev, "reserved bits set in wSpeedSupported\n");
return 0;
}
return 1;
}
static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
{
struct usb_ss_container_id_descriptor *con_id;
con_id = (struct usb_ss_container_id_descriptor *) buf;
if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
ERROR(tdev, "bogus container id descriptor length\n");
return 0;
}
if (con_id->bReserved) { /* reserved == 0 */
ERROR(tdev, "reserved bits set\n");
return 0;
}
return 1;
}
/* sanity test for standard requests working with usb_control_mesg() and some
* of the utility functions which use it.
*
* this doesn't test how endpoint halts behave or data toggles get set, since
* we won't do I/O to bulk/interrupt endpoints here (which is how to change
* halt or toggle). toggle testing is impractical without support from hcds.
*
* this avoids failing devices linux would normally work with, by not testing
* config/altsetting operations for devices that only support their defaults.
* such devices rarely support those needless operations.
*
* NOTE that since this is a sanity test, it's not examining boundary cases
* to see if usbcore, hcd, and device all behave right. such testing would
* involve varied read sizes and other operation sequences.
*/
static int ch9_postconfig(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int i, alt, retval;
/* [9.2.3] if there's more than one altsetting, we need to be able to
* set and get each one. mostly trusts the descriptors from usbcore.
*/
for (i = 0; i < iface->num_altsetting; i++) {
/* 9.2.3 constrains the range here */
alt = iface->altsetting[i].desc.bAlternateSetting;
if (alt < 0 || alt >= iface->num_altsetting) {
dev_err(&iface->dev,
"invalid alt [%d].bAltSetting = %d\n",
i, alt);
}
/* [real world] get/set unimplemented if there's only one */
if (realworld && iface->num_altsetting == 1)
continue;
/* [9.4.10] set_interface */
retval = set_altsetting(dev, alt);
if (retval) {
dev_err(&iface->dev, "can't set_interface = %d, %d\n",
alt, retval);
return retval;
}
/* [9.4.4] get_interface always works */
retval = get_altsetting(dev);
if (retval != alt) {
dev_err(&iface->dev, "get alt should be %d, was %d\n",
alt, retval);
return (retval < 0) ? retval : -EDOM;
}
}
/* [real world] get_config unimplemented if there's only one */
if (!realworld || udev->descriptor.bNumConfigurations != 1) {
int expected = udev->actconfig->desc.bConfigurationValue;
/* [9.4.2] get_configuration always works
* ... although some cheap devices (like one TI Hub I've got)
* won't return config descriptors except before set_config.
*/
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_CONFIGURATION,
USB_DIR_IN | USB_RECIP_DEVICE,
0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
if (retval != 1 || dev->buf[0] != expected) {
dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
retval, dev->buf[0], expected);
return (retval < 0) ? retval : -EDOM;
}
}
/* there's always [9.4.3] a device descriptor [9.6.1] */
retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
dev->buf, sizeof(udev->descriptor));
if (retval != sizeof(udev->descriptor)) {
dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
/*
* there's always [9.4.3] a bos device descriptor [9.6.2] in USB
* 3.0 spec
*/
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
struct usb_bos_descriptor *bos = NULL;
struct usb_dev_cap_header *header = NULL;
unsigned total, num, length;
u8 *buf;
retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
sizeof(*udev->bos->desc));
if (retval != sizeof(*udev->bos->desc)) {
dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
bos = (struct usb_bos_descriptor *)dev->buf;
total = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
if (total > TBUF_SIZE)
total = TBUF_SIZE;
/*
* get generic device-level capability descriptors [9.6.2]
* in USB 3.0 spec
*/
retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
total);
if (retval != total) {
dev_err(&iface->dev, "bos descriptor set --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
length = sizeof(*udev->bos->desc);
buf = dev->buf;
for (i = 0; i < num; i++) {
buf += length;
if (buf + sizeof(struct usb_dev_cap_header) >
dev->buf + total)
break;
header = (struct usb_dev_cap_header *)buf;
length = header->bLength;
if (header->bDescriptorType !=
USB_DT_DEVICE_CAPABILITY) {
dev_warn(&udev->dev, "not device capability descriptor, skip\n");
continue;
}
switch (header->bDevCapabilityType) {
case USB_CAP_TYPE_EXT:
if (buf + USB_DT_USB_EXT_CAP_SIZE >
dev->buf + total ||
!is_good_ext(dev, buf)) {
dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
return -EDOM;
}
break;
case USB_SS_CAP_TYPE:
if (buf + USB_DT_USB_SS_CAP_SIZE >
dev->buf + total ||
!is_good_ss_cap(dev, buf)) {
dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
return -EDOM;
}
break;
case CONTAINER_ID_TYPE:
if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
dev->buf + total ||
!is_good_con_id(dev, buf)) {
dev_err(&iface->dev, "bogus container id descriptor\n");
return -EDOM;
}
break;
default:
break;
}
}
}
/* there's always [9.4.3] at least one config descriptor [9.6.3] */
for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"config [%d] descriptor --> %d\n",
i, retval);
return (retval < 0) ? retval : -EDOM;
}
/* FIXME cross-checking udev->config[i] to make sure usbcore
* parsed it right (etc) would be good testing paranoia
*/
}
/* and sometimes [9.2.6.6] speed dependent descriptors */
if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
struct usb_qualifier_descriptor *d = NULL;
/* device qualifier [9.6.2] */
retval = usb_get_descriptor(udev,
USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
sizeof(struct usb_qualifier_descriptor));
if (retval == -EPIPE) {
if (udev->speed == USB_SPEED_HIGH) {
dev_err(&iface->dev,
"hs dev qualifier --> %d\n",
retval);
return retval;
}
/* usb2.0 but not high-speed capable; fine */
} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
} else
d = (struct usb_qualifier_descriptor *) dev->buf;
/* might not have [9.6.2] any other-speed configs [9.6.4] */
if (d) {
unsigned max = d->bNumConfigurations;
for (i = 0; i < max; i++) {
retval = usb_get_descriptor(udev,
USB_DT_OTHER_SPEED_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"other speed config --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
}
}
}
/* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
return retval;
}
/* FIXME configuration.bmAttributes says if we could try to set/clear
* the device's remote wakeup feature ... if we can, test that here
*/
retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
return retval;
}
/* FIXME get status for each endpoint in the interface */
return 0;
}
/*-------------------------------------------------------------------------*/
/* use ch9 requests to test whether:
* (a) queues work for control, keeping N subtests queued and
* active (auto-resubmit) for M loops through the queue.
* (b) protocol stalls (control-only) will autorecover.
* it's not like bulk/intr; no halt clearing.
* (c) short control reads are reported and handled.
* (d) queues are always processed in-order
*/
struct ctrl_ctx {
spinlock_t lock;
struct usbtest_dev *dev;
struct completion complete;
unsigned count;
unsigned pending;
int status;
struct urb **urb;
struct usbtest_param_32 *param;
int last;
};
#define NUM_SUBCASES 16 /* how many test subcases here? */
struct subcase {
struct usb_ctrlrequest setup;
int number;
int expected;
};
static void ctrl_complete(struct urb *urb)
{
struct ctrl_ctx *ctx = urb->context;
struct usb_ctrlrequest *reqp;
struct subcase *subcase;
int status = urb->status;
unsigned long flags;
reqp = (struct usb_ctrlrequest *)urb->setup_packet;
subcase = container_of(reqp, struct subcase, setup);
spin_lock_irqsave(&ctx->lock, flags);
ctx->count--;
ctx->pending--;
/* queue must transfer and complete in fifo order, unless
* usb_unlink_urb() is used to unlink something not at the
* physical queue head (not tested).
*/
if (subcase->number > 0) {
if ((subcase->number - ctx->last) != 1) {
ERROR(ctx->dev,
"subcase %d completed out of order, last %d\n",
subcase->number, ctx->last);
status = -EDOM;
ctx->last = subcase->number;
goto error;
}
}
ctx->last = subcase->number;
/* succeed or fault in only one way? */
if (status == subcase->expected)
status = 0;
/* async unlink for cleanup? */
else if (status != -ECONNRESET) {
/* some faults are allowed, not required */
if (subcase->expected > 0 && (
((status == -subcase->expected /* happened */
|| status == 0)))) /* didn't */
status = 0;
/* sometimes more than one fault is allowed */
else if (subcase->number == 12 && status == -EPIPE)
status = 0;
else
ERROR(ctx->dev, "subtest %d error, status %d\n",
subcase->number, status);
}
/* unexpected status codes mean errors; ideally, in hardware */
if (status) {
error:
if (ctx->status == 0) {
int i;
ctx->status = status;
ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
"%d left, subcase %d, len %d/%d\n",
reqp->bRequestType, reqp->bRequest,
status, ctx->count, subcase->number,
urb->actual_length,
urb->transfer_buffer_length);
/* FIXME this "unlink everything" exit route should
* be a separate test case.
*/
/* unlink whatever's still pending */
for (i = 1; i < ctx->param->sglen; i++) {
struct urb *u = ctx->urb[
(i + subcase->number)
% ctx->param->sglen];
if (u == urb || !u->dev)
continue;
spin_unlock(&ctx->lock);
status = usb_unlink_urb(u);
spin_lock(&ctx->lock);
switch (status) {
case -EINPROGRESS:
case -EBUSY:
case -EIDRM:
continue;
default:
ERROR(ctx->dev, "urb unlink --> %d\n",
status);
}
}
status = ctx->status;
}
}
/* resubmit if we need to, else mark this as done */
if ((status == 0) && (ctx->pending < ctx->count)) {
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0) {
ERROR(ctx->dev,
"can't resubmit ctrl %02x.%02x, err %d\n",
reqp->bRequestType, reqp->bRequest, status);
urb->dev = NULL;
} else
ctx->pending++;
} else
urb->dev = NULL;
/* signal completion when nothing's queued */
if (ctx->pending == 0)
complete(&ctx->complete);
spin_unlock_irqrestore(&ctx->lock, flags);
}
static int
test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param)
{
struct usb_device *udev = testdev_to_usbdev(dev);
struct urb **urb;
struct ctrl_ctx context;
int i;
if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
return -EOPNOTSUPP;
spin_lock_init(&context.lock);
context.dev = dev;
init_completion(&context.complete);
context.count = param->sglen * param->iterations;
context.pending = 0;
context.status = -ENOMEM;
context.param = param;
context.last = -1;
/* allocate and init the urbs we'll queue.
* as with bulk/intr sglists, sglen is the queue depth; it also
* controls which subtests run (more tests than sglen) or rerun.
*/
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
if (!urb)
return -ENOMEM;
for (i = 0; i < param->sglen; i++) {
int pipe = usb_rcvctrlpipe(udev, 0);
unsigned len;
struct urb *u;
struct usb_ctrlrequest req;
struct subcase *reqp;
/* sign of this variable means:
* -: tested code must return this (negative) error code
* +: tested code may return this (negative too) error code
*/
int expected = 0;
/* requests here are mostly expected to succeed on any
* device, but some are chosen to trigger protocol stalls
* or short reads.
*/
memset(&req, 0, sizeof(req));
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
switch (i % NUM_SUBCASES) {
case 0: /* get device descriptor */
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
len = sizeof(struct usb_device_descriptor);
break;
case 1: /* get first config descriptor (only) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
break;
case 2: /* get altsetting (OFTEN STALLS) */
req.bRequest = USB_REQ_GET_INTERFACE;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* index = 0 means first interface */
len = 1;
expected = EPIPE;
break;
case 3: /* get interface status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* interface 0 */
len = 2;
break;
case 4: /* get device status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
len = 2;
break;
case 5: /* get device qualifier (MAY STALL) */
req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
len = sizeof(struct usb_qualifier_descriptor);
if (udev->speed != USB_SPEED_HIGH)
expected = EPIPE;
break;
case 6: /* get first config descriptor, plus interface */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
len += sizeof(struct usb_interface_descriptor);
break;
case 7: /* get interface descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
/* interface == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = -EPIPE;
break;
/* NOTE: two consecutive stalls in the queue here.
* that tests fault recovery a bit more aggressively. */
case 8: /* clear endpoint halt (MAY STALL) */
req.bRequest = USB_REQ_CLEAR_FEATURE;
req.bRequestType = USB_RECIP_ENDPOINT;
/* wValue 0 == ep halt */
/* wIndex 0 == ep0 (shouldn't halt!) */
len = 0;
pipe = usb_sndctrlpipe(udev, 0);
expected = EPIPE;
break;
case 9: /* get endpoint status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
/* endpoint 0 */
len = 2;
break;
case 10: /* trigger short read (EREMOTEIO) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = 1024;
expected = -EREMOTEIO;
break;
/* NOTE: two consecutive _different_ faults in the queue. */
case 11: /* get endpoint descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
/* endpoint == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = EPIPE;
break;
/* NOTE: sometimes even a third fault in the queue! */
case 12: /* get string 0 descriptor (MAY STALL) */
req.wValue = cpu_to_le16(USB_DT_STRING << 8);
/* string == 0, for language IDs */
len = sizeof(struct usb_interface_descriptor);
/* may succeed when > 4 languages */
expected = EREMOTEIO; /* or EPIPE, if no strings */
break;
case 13: /* short read, resembling case 10 */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
/* last data packet "should" be DATA1, not DATA0 */
if (udev->speed == USB_SPEED_SUPER)
len = 1024 - 512;
else
len = 1024 - udev->descriptor.bMaxPacketSize0;
expected = -EREMOTEIO;
break;
case 14: /* short read; try to fill the last packet */
req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
/* device descriptor size == 18 bytes */
len = udev->descriptor.bMaxPacketSize0;
if (udev->speed == USB_SPEED_SUPER)
len = 512;
switch (len) {
case 8:
len = 24;
break;
case 16:
len = 32;
break;
}
expected = -EREMOTEIO;
break;
case 15:
req.wValue = cpu_to_le16(USB_DT_BOS << 8);
if (udev->bos)
len = le16_to_cpu(udev->bos->desc->wTotalLength);
else
len = sizeof(struct usb_bos_descriptor);
if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
expected = -EPIPE;
break;
default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL;
goto cleanup;
}
req.wLength = cpu_to_le16(len);
urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
if (!u)
goto cleanup;
reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
u->context = &context;
u->complete = ctrl_complete;
}
/* queue the urbs */
context.urb = urb;
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
if (context.status != 0) {
ERROR(dev, "can't submit urb[%d], status %d\n",
i, context.status);
context.count = context.pending;
break;
}
context.pending++;
}
spin_unlock_irq(&context.lock);
/* FIXME set timer and time out; provide a disconnect hook */
/* wait for the last one to complete */
if (context.pending > 0)
wait_for_completion(&context.complete);
cleanup:
for (i = 0; i < param->sglen; i++) {
if (!urb[i])
continue;
urb[i]->dev = udev;
kfree(urb[i]->setup_packet);
simple_free_urb(urb[i]);
}
kfree(urb);
return context.status;
}
#undef NUM_SUBCASES
/*-------------------------------------------------------------------------*/
static void unlink1_callback(struct urb *urb)
{
int status = urb->status;
/* we "know" -EPIPE (stall) never happens */
if (!status)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
urb->status = status;
complete(urb->context);
}
}
static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
{
struct urb *urb;
struct completion completion;
int retval = 0;
init_completion(&completion);
urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
if (!urb)
return -ENOMEM;
urb->context = &completion;
urb->complete = unlink1_callback;
if (usb_pipeout(urb->pipe)) {
simple_fill_buf(urb);
urb->transfer_flags |= URB_ZERO_PACKET;
}
/* keep the endpoint busy. there are lots of hc/hcd-internal
* states, and testing should get to all of them over time.
*
* FIXME want additional tests for when endpoint is STALLing
* due to errors, or is just NAKing requests.
*/
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit fail %d\n", retval);
return retval;
}
/* unlinking that should always work. variable delay tests more
* hcd states and code paths, even with little other system load.
*/
msleep(jiffies % (2 * INTERRUPT_RATE));
if (async) {
while (!completion_done(&completion)) {
retval = usb_unlink_urb(urb);
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(dev, urb);
switch (retval) {
case -EBUSY:
case -EIDRM:
/* we can't unlink urbs while they're completing
* or if they've completed, and we haven't
* resubmitted. "normal" drivers would prevent
* resubmission, but since we're testing unlink
* paths, we can't.
*/
ERROR(dev, "unlink retry\n");
continue;
case 0:
case -EINPROGRESS:
break;
default:
dev_err(&dev->intf->dev,
"unlink fail %d\n", retval);
return retval;
}
break;
}
} else
usb_kill_urb(urb);
wait_for_completion(&completion);
retval = urb->status;
simple_free_urb(urb);
if (async)
return (retval == -ECONNRESET) ? 0 : retval - 1000;
else
return (retval == -ENOENT || retval == -EPERM) ?
0 : retval - 2000;
}
static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
{
int retval = 0;
/* test sync and async paths */
retval = unlink1(dev, pipe, len, 1);
if (!retval)
retval = unlink1(dev, pipe, len, 0);
return retval;
}
/*-------------------------------------------------------------------------*/
struct queued_ctx {
struct completion complete;
atomic_t pending;
unsigned num;
int status;
struct urb **urbs;
};
static void unlink_queued_callback(struct urb *urb)
{
int status = urb->status;
struct queued_ctx *ctx = urb->context;
if (ctx->status)
goto done;
if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
if (status == -ECONNRESET)
goto done;
/* What error should we report if the URB completed normally? */
}
if (status != 0)
ctx->status = status;
done:
if (atomic_dec_and_test(&ctx->pending))
complete(&ctx->complete);
}
static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
unsigned size)
{
struct queued_ctx ctx;
struct usb_device *udev = testdev_to_usbdev(dev);
void *buf;
dma_addr_t buf_dma;
int i;
int retval = -ENOMEM;
init_completion(&ctx.complete);
atomic_set(&ctx.pending, 1); /* One more than the actual value */
ctx.num = num;
ctx.status = 0;
buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
if (!buf)
return retval;
memset(buf, 0, size);
/* Allocate and init the urbs we'll queue */
ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
if (!ctx.urbs)
goto free_buf;
for (i = 0; i < num; i++) {
ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!ctx.urbs[i])
goto free_urbs;
usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
unlink_queued_callback, &ctx);
ctx.urbs[i]->transfer_dma = buf_dma;
ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
if (usb_pipeout(ctx.urbs[i]->pipe)) {
simple_fill_buf(ctx.urbs[i]);
ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
}
}
/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
for (i = 0; i < num; i++) {
atomic_inc(&ctx.pending);
retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
i, retval);
atomic_dec(&ctx.pending);
ctx.status = retval;
break;
}
}
if (i == num) {
usb_unlink_urb(ctx.urbs[num - 4]);
usb_unlink_urb(ctx.urbs[num - 2]);
} else {
while (--i >= 0)
usb_unlink_urb(ctx.urbs[i]);
}
if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
complete(&ctx.complete);
wait_for_completion(&ctx.complete);
retval = ctx.status;
free_urbs:
for (i = 0; i < num; i++)
usb_free_urb(ctx.urbs[i]);
kfree(ctx.urbs);
free_buf:
usb_free_coherent(udev, size, buf, buf_dma);
return retval;
}
/*-------------------------------------------------------------------------*/
static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* shouldn't look or act halted */
retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
ep, retval);
return retval;
}
if (status != 0) {
ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
if (retval != 0)
return -EINVAL;
return 0;
}
static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* should look and act halted */
retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval);
return retval;
}
if (status != 1) {
ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
if (retval != -EPIPE)
return -EINVAL;
retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
if (retval != -EPIPE)
return -EINVAL;
return 0;
}
static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
/* shouldn't look or act halted now */
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* set halt (protocol test only), verify it worked */
retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, ep,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
return retval;
}
retval = verify_halted(tdev, ep, urb);
if (retval < 0) {
int ret;
/* clear halt anyways, else further tests will fail */
ret = usb_clear_halt(urb->dev, urb->pipe);
if (ret)
ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
ep, ret);
return retval;
}
/* clear halt (tests API + protocol), verify it worked */
retval = usb_clear_halt(urb->dev, urb->pipe);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
return retval;
}
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* NOTE: could also verify SET_INTERFACE clear halts ... */
return 0;
}
static int test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
/* clear initial data toggle to DATA0 */
retval = usb_clear_halt(urb->dev, urb->pipe);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
return retval;
}
/* transfer 3 data packets, should be DATA0, DATA1, DATA0 */
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
if (retval != 0)
return -EINVAL;
/* clear halt resets device side data toggle, host should react to it */
retval = usb_clear_halt(urb->dev, urb->pipe);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
return retval;
}
/* host should use DATA0 again after clear halt */
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
return retval;
}
static int halt_simple(struct usbtest_dev *dev)
{
int ep;
int retval = 0;
struct urb *urb;
struct usb_device *udev = testdev_to_usbdev(dev);
if (udev->speed == USB_SPEED_SUPER)
urb = simple_alloc_urb(udev, 0, 1024, 0);
else
urb = simple_alloc_urb(udev, 0, 512, 0);
if (urb == NULL)
return -ENOMEM;
if (dev->in_pipe) {
ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
urb->pipe = dev->in_pipe;
retval = test_halt(dev, ep, urb);
if (retval < 0)
goto done;
}
if (dev->out_pipe) {
ep = usb_pipeendpoint(dev->out_pipe);
urb->pipe = dev->out_pipe;
retval = test_halt(dev, ep, urb);
}
done:
simple_free_urb(urb);
return retval;
}
static int toggle_sync_simple(struct usbtest_dev *dev)
{
int ep;
int retval = 0;
struct urb *urb;
struct usb_device *udev = testdev_to_usbdev(dev);
unsigned maxp = get_maxpacket(udev, dev->out_pipe);
/*
* Create a URB that causes a transfer of uneven amount of data packets
* This way the clear toggle has an impact on the data toggle sequence.
* Use 2 maxpacket length packets and one zero packet.
*/
urb = simple_alloc_urb(udev, 0, 2 * maxp, 0);
if (urb == NULL)
return -ENOMEM;
urb->transfer_flags |= URB_ZERO_PACKET;
ep = usb_pipeendpoint(dev->out_pipe);
urb->pipe = dev->out_pipe;
retval = test_toggle_sync(dev, ep, urb);
simple_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------------*/
/* Control OUT tests use the vendor control requests from Intel's
* USB 2.0 compliance test device: write a buffer, read it back.
*
* Intel's spec only _requires_ that it work for one packet, which
* is pretty weak. Some HCDs place limits here; most devices will
* need to be able to handle more than one OUT data packet. We'll
* try whatever we're told to try.
*/
static int ctrl_out(struct usbtest_dev *dev,
unsigned count, unsigned length, unsigned vary, unsigned offset)
{
unsigned i, j, len;
int retval;
u8 *buf;
char *what = "?";
struct usb_device *udev;
if (length < 1 || length > 0xffff || vary >= length)
return -EINVAL;
buf = kmalloc(length + offset, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf += offset;
udev = testdev_to_usbdev(dev);
len = length;
retval = 0;
/* NOTE: hardware might well act differently if we pushed it
* with lots back-to-back queued requests.
*/
for (i = 0; i < count; i++) {
/* write patterned data */
for (j = 0; j < len; j++)
buf[j] = (u8)(i + j);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
if (retval != len) {
what = "write";
if (retval >= 0) {
ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* read it back -- assuming nothing intervened!! */
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
if (retval != len) {
what = "read";
if (retval >= 0) {
ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* fail if we can't verify */
for (j = 0; j < len; j++) {
if (buf[j] != (u8)(i + j)) {
ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
j, buf[j], (u8)(i + j));
retval = -EBADMSG;
break;
}
}
if (retval < 0) {
what = "verify";
break;
}
len += vary;
/* [real world] the "zero bytes IN" case isn't really used.
* hardware can easily trip up in this weird case, since its
* status stage is IN, not OUT like other ep0in transfers.
*/
if (len > length)
len = realworld ? 1 : 0;
}
if (retval < 0)
ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
what, retval, i);
kfree(buf - offset);
return retval;
}
/*-------------------------------------------------------------------------*/
/* ISO/BULK tests ... mimics common usage
* - buffer length is split into N packets (mostly maxpacket sized)
* - multi-buffers according to sglen
*/
struct transfer_context {
unsigned count;
unsigned pending;
spinlock_t lock;
struct completion done;
int submit_error;
unsigned long errors;
unsigned long packet_count;
struct usbtest_dev *dev;
bool is_iso;
};
static void complicated_callback(struct urb *urb)
{
struct transfer_context *ctx = urb->context;
unsigned long flags;
spin_lock_irqsave(&ctx->lock, flags);
ctx->count--;
ctx->packet_count += urb->number_of_packets;
if (urb->error_count > 0)
ctx->errors += urb->error_count;
else if (urb->status != 0)
ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1);
else if (urb->actual_length != urb->transfer_buffer_length)
ctx->errors++;
else if (check_guard_bytes(ctx->dev, urb) != 0)
ctx->errors++;
if (urb->status == 0 && ctx->count > (ctx->pending - 1)
&& !ctx->submit_error) {
int status = usb_submit_urb(urb, GFP_ATOMIC);
switch (status) {
case 0:
goto done;
default:
dev_err(&ctx->dev->intf->dev,
"resubmit err %d\n",
status);
fallthrough;
case -ENODEV: /* disconnected */
case -ESHUTDOWN: /* endpoint disabled */
ctx->submit_error = 1;
break;
}
}
ctx->pending--;
if (ctx->pending == 0) {
if (ctx->errors)
dev_err(&ctx->dev->intf->dev,
"during the test, %lu errors out of %lu\n",
ctx->errors, ctx->packet_count);
complete(&ctx->done);
}
done:
spin_unlock_irqrestore(&ctx->lock, flags);
}
static struct urb *iso_alloc_urb(
struct usb_device *udev,
int pipe,
struct usb_endpoint_descriptor *desc,
long bytes,
unsigned offset
)
{
struct urb *urb;
unsigned i, maxp, packets;
if (bytes < 0 || !desc)
return NULL;
maxp = usb_endpoint_maxp(desc);
if (udev->speed >= USB_SPEED_SUPER)
maxp *= ss_isoc_get_packet_num(udev, pipe);
else
maxp *= usb_endpoint_maxp_mult(desc);
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
if (!urb)
return urb;
urb->dev = udev;
urb->pipe = pipe;
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
for (i = 0; i < packets; i++) {
/* here, only the last packet will be short */
urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
bytes -= urb->iso_frame_desc[i].length;
urb->iso_frame_desc[i].offset = maxp * i;
}
urb->complete = complicated_callback;
/* urb->context = SET BY CALLER */
urb->interval = 1 << (desc->bInterval - 1);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
return urb;
}
static int
test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
{
struct transfer_context context;
struct usb_device *udev;
unsigned i;
unsigned long packets = 0;
int status = 0;
struct urb **urbs;
if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
return -EINVAL;
if (param->sglen > MAX_SGLEN)
return -EINVAL;
urbs = kcalloc(param->sglen, sizeof(*urbs), GFP_KERNEL);
if (!urbs)
return -ENOMEM;
memset(&context, 0, sizeof(context));
context.count = param->iterations * param->sglen;
context.dev = dev;
context.is_iso = !!desc;
init_completion(&context.done);
spin_lock_init(&context.lock);
udev = testdev_to_usbdev(dev);
for (i = 0; i < param->sglen; i++) {
if (context.is_iso)
urbs[i] = iso_alloc_urb(udev, pipe, desc,
param->length, offset);
else
urbs[i] = complicated_alloc_urb(udev, pipe,
param->length, 0);
if (!urbs[i]) {
status = -ENOMEM;
goto fail;
}
packets += urbs[i]->number_of_packets;
urbs[i]->context = &context;
}
packets *= param->iterations;
if (context.is_iso) {
int transaction_num;
if (udev->speed >= USB_SPEED_SUPER)
transaction_num = ss_isoc_get_packet_num(udev, pipe);
else
transaction_num = usb_endpoint_maxp_mult(desc);
dev_info(&dev->intf->dev,
"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
1 << (desc->bInterval - 1),
(udev->speed >= USB_SPEED_HIGH) ? "micro" : "",
usb_endpoint_maxp(desc),
transaction_num);
dev_info(&dev->intf->dev,
"total %lu msec (%lu packets)\n",
(packets * (1 << (desc->bInterval - 1)))
/ ((udev->speed >= USB_SPEED_HIGH) ? 8 : 1),
packets);
}
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
++context.pending;
status = usb_submit_urb(urbs[i], GFP_ATOMIC);
if (status < 0) {
ERROR(dev, "submit iso[%d], error %d\n", i, status);
if (i == 0) {
spin_unlock_irq(&context.lock);
goto fail;
}
simple_free_urb(urbs[i]);
urbs[i] = NULL;
context.pending--;
context.submit_error = 1;
break;
}
}
spin_unlock_irq(&context.lock);
wait_for_completion(&context.done);
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
/*
* Isochronous transfers are expected to fail sometimes. As an
* arbitrary limit, we will report an error if any submissions
* fail or if the transfer failure rate is > 10%.
*/
if (status != 0)
;
else if (context.submit_error)
status = -EACCES;
else if (context.errors >
(context.is_iso ? context.packet_count / 10 : 0))
status = -EIO;
kfree(urbs);
return status;
fail:
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
kfree(urbs);
return status;
}
static int test_unaligned_bulk(
struct usbtest_dev *tdev,
int pipe,
unsigned length,
int iterations,
unsigned transfer_flags,
const char *label)
{
int retval;
struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev),
pipe, length, transfer_flags, 1, 0, simple_callback);
if (!urb)
return -ENOMEM;
retval = simple_io(tdev, urb, iterations, 0, 0, label);
simple_free_urb(urb);
return retval;
}
/* Run tests. */
static int
usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
struct usb_device *udev = testdev_to_usbdev(dev);
struct urb *urb;
struct scatterlist *sg;
struct usb_sg_request req;
unsigned i;
int retval = -EOPNOTSUPP;
if (param->iterations <= 0)
return -EINVAL;
if (param->sglen > MAX_SGLEN)
return -EINVAL;
/*
* Just a bunch of test cases that every HCD is expected to handle.
*
* Some may need specific firmware, though it'd be good to have
* one firmware image to handle all the test cases.
*
* FIXME add more tests! cancel requests, verify the data, control
* queueing, concurrent read+write threads, and so on.
*/
switch (param->test_num) {
case 0:
dev_info(&intf->dev, "TEST 0: NOP\n");
retval = 0;
break;
/* Simple non-queued bulk I/O tests */
case 1:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 1: write %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
simple_free_urb(urb);
break;
case 2:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 2: read %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
simple_free_urb(urb);
break;
case 3:
if (dev->out_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 3: write/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test3");
simple_free_urb(urb);
break;
case 4:
if (dev->in_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 4: read/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test4");
simple_free_urb(urb);
break;
/* Queued bulk I/O tests */
case 5:
if (dev->out_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 5: write %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length,
0, dev, dev->out_pipe);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 6:
if (dev->in_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 6: read %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length,
0, dev, dev->in_pipe);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 7:
if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length,
param->vary, dev, dev->out_pipe);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 8:
if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length,
param->vary, dev, dev->in_pipe);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
/* non-queued sanity tests for control (chapter 9 subset) */
case 9:
retval = 0;
dev_info(&intf->dev,
"TEST 9: ch9 (subset) control tests, %d times\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = ch9_postconfig(dev);
if (retval)
dev_err(&intf->dev, "ch9 subset failed, "
"iterations left %d\n", i);
break;
/* queued control messaging */
case 10:
retval = 0;
dev_info(&intf->dev,
"TEST 10: queue %d control calls, %d times\n",
param->sglen,
param->iterations);
retval = test_ctrl_queue(dev, param);
break;
/* simple non-queued unlinks (ring with one urb) */
case 11:
if (dev->in_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->in_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink reads failed %d, "
"iterations left %d\n", retval, i);
break;
case 12:
if (dev->out_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->out_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink writes failed %d, "
"iterations left %d\n", retval, i);
break;
/* ep halt tests */
case 13:
if (dev->out_pipe == 0 && dev->in_pipe == 0)
break;
retval = 0;
dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = halt_simple(dev);
if (retval)
ERROR(dev, "halts failed, iterations left %d\n", i);
break;
/* control write tests */
case 14:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 0);
break;
/* iso write tests */
case 15:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 15: write %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso sink */
retval = test_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 0);
break;
/* iso read tests */
case 16:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 16: read %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso source */
retval = test_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 0);
break;
/* FIXME scatterlist cancel (needs helper thread) */
/* Tests for bulk I/O using DMA mapping by core and odd address */
case 17:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 17: write odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
0, "test17");
break;
case 18:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 18: read odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
0, "test18");
break;
/* Tests for bulk I/O using premapped coherent buffer and odd address */
case 19:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 19: write odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test19");
break;
case 20:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 20: read odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test20");
break;
/* control write tests with unaligned buffer */
case 21:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev,
"TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 1);
break;
/* unaligned iso tests */
case 22:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 22: write %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 1);
break;
case 23:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 23: read %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 1);
break;
/* unlink URBs from a bulk-OUT queue */
case 24:
if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
break;
retval = 0;
dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
"%d %d-byte writes\n",
param->iterations, param->sglen, param->length);
for (i = param->iterations; retval == 0 && i > 0; --i) {
retval = unlink_queued(dev, dev->out_pipe,
param->sglen, param->length);
if (retval) {
dev_err(&intf->dev,
"unlink queued writes failed %d, "
"iterations left %d\n", retval, i);
break;
}
}
break;
/* Simple non-queued interrupt I/O tests */
case 25:
if (dev->out_int_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 25: write %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
dev->int_out->bInterval);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: interrupt sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
simple_free_urb(urb);
break;
case 26:
if (dev->in_int_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 26: read %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
dev->int_in->bInterval);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: interrupt source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
simple_free_urb(urb);
break;
case 27:
/* We do performance test, so ignore data compare */
if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0)
break;
dev_info(&intf->dev,
"TEST 27: bulk write %dMbytes\n", (param->iterations *
param->sglen * param->length) / (1024 * 1024));
retval = test_queue(dev, param,
dev->out_pipe, NULL, 0);
break;
case 28:
if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0)
break;
dev_info(&intf->dev,
"TEST 28: bulk read %dMbytes\n", (param->iterations *
param->sglen * param->length) / (1024 * 1024));
retval = test_queue(dev, param,
dev->in_pipe, NULL, 0);
break;
/* Test data Toggle/seq_nr clear between bulk out transfers */
case 29:
if (dev->out_pipe == 0)
break;
retval = 0;
dev_info(&intf->dev, "TEST 29: Clear toggle between bulk writes %d times\n",
param->iterations);
for (i = param->iterations; retval == 0 && i > 0; --i)
retval = toggle_sync_simple(dev);
if (retval)
ERROR(dev, "toggle sync failed, iterations left %d\n",
i);
break;
}
return retval;
}
/*-------------------------------------------------------------------------*/
/* We only have this one interface to user space, through usbfs.
* User mode code can scan usbfs to find N different devices (maybe on
* different busses) to use when testing, and allocate one thread per
* test. So discovery is simplified, and we have no device naming issues.
*
* Don't use these only as stress/load tests. Use them along with
* other USB bus activity: plugging, unplugging, mousing, mp3 playback,
* video capture, and so on. Run different tests at different times, in
* different sequences. Nothing here should interact with other devices,
* except indirectly by consuming USB bandwidth and CPU resources for test
* threads and request completion. But the only way to know that for sure
* is to test when HC queues are in use by many devices.
*
* WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
* it locks out usbcore in certain code paths. Notably, if you disconnect
* the device-under-test, hub_wq will wait block forever waiting for the
* ioctl to complete ... so that usb_disconnect() can abort the pending
* urbs and then call usbtest_disconnect(). To abort a test, you're best
* off just killing the userspace task and waiting for it to exit.
*/
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
struct usbtest_param_64 *param_64 = buf;
struct usbtest_param_32 temp;
struct usbtest_param_32 *param_32 = buf;
struct timespec64 start;
struct timespec64 end;
struct timespec64 duration;
int retval = -EOPNOTSUPP;
/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
pattern = mod_pattern;
if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
* altsettings; force a default so most tests don't need to check.
*/
if (dev->info->alt >= 0) {
if (intf->altsetting->desc.bInterfaceNumber) {
retval = -ENODEV;
goto free_mutex;
}
retval = set_altsetting(dev, dev->info->alt);
if (retval) {
dev_err(&intf->dev,
"set altsetting to %d failed, %d\n",
dev->info->alt, retval);
goto free_mutex;
}
}
switch (code) {
case USBTEST_REQUEST_64:
temp.test_num = param_64->test_num;
temp.iterations = param_64->iterations;
temp.length = param_64->length;
temp.sglen = param_64->sglen;
temp.vary = param_64->vary;
param_32 = &temp;
break;
case USBTEST_REQUEST_32:
break;
default:
retval = -EOPNOTSUPP;
goto free_mutex;
}
ktime_get_ts64(&start);
retval = usbtest_do_ioctl(intf, param_32);
if (retval < 0)
goto free_mutex;
ktime_get_ts64(&end);
duration = timespec64_sub(end, start);
temp.duration_sec = duration.tv_sec;
temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC;
switch (code) {
case USBTEST_REQUEST_32:
param_32->duration_sec = temp.duration_sec;
param_32->duration_usec = temp.duration_usec;
break;
case USBTEST_REQUEST_64:
param_64->duration_sec = temp.duration_sec;
param_64->duration_usec = temp.duration_usec;
break;
}
free_mutex:
mutex_unlock(&dev->lock);
return retval;
}
/*-------------------------------------------------------------------------*/
static unsigned force_interrupt;
module_param(force_interrupt, uint, 0);
MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
#ifdef GENERIC
static unsigned short vendor;
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
static unsigned short product;
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "product code (from vendor)");
#endif
static int
usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev;
struct usbtest_dev *dev;
struct usbtest_info *info;
char *rtest, *wtest;
char *irtest, *iwtest;
char *intrtest, *intwtest;
udev = interface_to_usbdev(intf);
#ifdef GENERIC
/* specify devices by module parameters? */
if (id->match_flags == 0) {
/* vendor match required, product match optional */
if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
return -ENODEV;
if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
return -ENODEV;
dev_info(&intf->dev, "matched module params, "
"vend=0x%04x prod=0x%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
}
#endif
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
info = (struct usbtest_info *) id->driver_info;
dev->info = info;
mutex_init(&dev->lock);
dev->intf = intf;
/* cacheline-aligned scratch for i/o */
dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
if (dev->buf == NULL) {
kfree(dev);
return -ENOMEM;
}
/* NOTE this doesn't yet test the handful of difference that are
* visible with high speed interrupts: bigger maxpacket (1K) and
* "high bandwidth" modes (up to 3 packets/uframe).
*/
rtest = wtest = "";
irtest = iwtest = "";
intrtest = intwtest = "";
if (force_interrupt || udev->speed == USB_SPEED_LOW) {
if (info->ep_in) {
dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
rtest = " intr-in";
}
if (info->ep_out) {
dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
wtest = " intr-out";
}
} else {
if (override_alt >= 0 || info->autoconf) {
int status;
status = get_endpoints(dev, intf);
if (status < 0) {
WARNING(dev, "couldn't get endpoints, %d\n",
status);
kfree(dev->buf);
kfree(dev);
return status;
}
/* may find bulk or ISO pipes */
} else {
if (info->ep_in)
dev->in_pipe = usb_rcvbulkpipe(udev,
info->ep_in);
if (info->ep_out)
dev->out_pipe = usb_sndbulkpipe(udev,
info->ep_out);
}
if (dev->in_pipe)
rtest = " bulk-in";
if (dev->out_pipe)
wtest = " bulk-out";
if (dev->in_iso_pipe)
irtest = " iso-in";
if (dev->out_iso_pipe)
iwtest = " iso-out";
if (dev->in_int_pipe)
intrtest = " int-in";
if (dev->out_int_pipe)
intwtest = " int-out";
}
usb_set_intfdata(intf, dev);
dev_info(&intf->dev, "%s\n", info->name);
dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
usb_speed_string(udev->speed),
info->ctrl_out ? " in/out" : "",
rtest, wtest,
irtest, iwtest,
intrtest, intwtest,
info->alt >= 0 ? " (+alt)" : "");
return 0;
}
static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int usbtest_resume(struct usb_interface *intf)
{
return 0;
}
static void usbtest_disconnect(struct usb_interface *intf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
kfree(dev->buf);
kfree(dev);
}
/* Basic testing only needs a device that can source or sink bulk traffic.
* Any device can test control transfers (default with GENERIC binding).
*
* Several entries work with the default EP0 implementation that's built
* into EZ-USB chips. There's a default vendor ID which can be overridden
* by (very) small config EEPROMS, but otherwise all these devices act
* identically until firmware is loaded: only EP0 works. It turns out
* to be easy to make other endpoints work, without modifying that EP0
* behavior. For now, we expect that kind of firmware.
*/
/* an21xx or fx versions of ez-usb */
static struct usbtest_info ez1_info = {
.name = "EZ-USB device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
};
/* fx2 version of ez-usb */
static struct usbtest_info ez2_info = {
.name = "FX2 device",
.ep_in = 6,
.ep_out = 2,
.alt = 1,
};
/* ezusb family device with dedicated usb test firmware,
*/
static struct usbtest_info fw_info = {
.name = "usb test device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
.autoconf = 1, /* iso and ctrl_out need autoconf */
.ctrl_out = 1,
.iso = 1, /* iso_ep's are #8 in/out */
};
/* peripheral running Linux and 'zero.c' test firmware, or
* its user-mode cousin. different versions of this use
* different hardware with the same vendor/product codes.
* host side MUST rely on the endpoint descriptors.
*/
static struct usbtest_info gz_info = {
.name = "Linux gadget zero",
.autoconf = 1,
.ctrl_out = 1,
.iso = 1,
.intr = 1,
.alt = 0,
};
static struct usbtest_info um_info = {
.name = "Linux user mode test driver",
.autoconf = 1,
.alt = -1,
};
static struct usbtest_info um2_info = {
.name = "Linux user mode ISO test driver",
.autoconf = 1,
.iso = 1,
.alt = -1,
};
#ifdef IBOT2
/* this is a nice source of high speed bulk data;
* uses an FX2, with firmware provided in the device
*/
static struct usbtest_info ibot2_info = {
.name = "iBOT2 webcam",
.ep_in = 2,
.alt = -1,
};
#endif
#ifdef GENERIC
/* we can use any device to test control traffic */
static struct usbtest_info generic_info = {
.name = "Generic USB device",
.alt = -1,
};
#endif
static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
/* EZ-USB devices which download firmware to replace (or in our
* case augment) the default device implementation.
*/
/* generic EZ-USB FX controller */
{ USB_DEVICE(0x0547, 0x2235),
.driver_info = (unsigned long) &ez1_info,
},
/* CY3671 development board with EZ-USB FX */
{ USB_DEVICE(0x0547, 0x0080),
.driver_info = (unsigned long) &ez1_info,
},
/* generic EZ-USB FX2 controller (or development board) */
{ USB_DEVICE(0x04b4, 0x8613),
.driver_info = (unsigned long) &ez2_info,
},
/* re-enumerated usb test device firmware */
{ USB_DEVICE(0xfff0, 0xfff0),
.driver_info = (unsigned long) &fw_info,
},
/* "Gadget Zero" firmware runs under Linux */
{ USB_DEVICE(0x0525, 0xa4a0),
.driver_info = (unsigned long) &gz_info,
},
/* so does a user-mode variant */
{ USB_DEVICE(0x0525, 0xa4a4),
.driver_info = (unsigned long) &um_info,
},
/* ... and a user-mode variant that talks iso */
{ USB_DEVICE(0x0525, 0xa4a3),
.driver_info = (unsigned long) &um2_info,
},
#ifdef KEYSPAN_19Qi
/* Keyspan 19qi uses an21xx (original EZ-USB) */
/* this does not coexist with the real Keyspan 19qi driver! */
{ USB_DEVICE(0x06cd, 0x010b),
.driver_info = (unsigned long) &ez1_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef IBOT2
/* iBOT2 makes a nice source of high speed bulk-in data */
/* this does not coexist with a real iBOT2 driver! */
{ USB_DEVICE(0x0b62, 0x0059),
.driver_info = (unsigned long) &ibot2_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef GENERIC
/* module params can specify devices to use for control tests */
{ .driver_info = (unsigned long) &generic_info, },
#endif
/*-------------------------------------------------------------*/
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver usbtest_driver = {
.name = "usbtest",
.id_table = id_table,
.probe = usbtest_probe,
.unlocked_ioctl = usbtest_ioctl,
.disconnect = usbtest_disconnect,
.suspend = usbtest_suspend,
.resume = usbtest_resume,
};
/*-------------------------------------------------------------------------*/
static int __init usbtest_init(void)
{
#ifdef GENERIC
if (vendor)
pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
#endif
return usb_register(&usbtest_driver);
}
module_init(usbtest_init);
static void __exit usbtest_exit(void)
{
usb_deregister(&usbtest_driver);
}
module_exit(usbtest_exit);
MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/usbtest.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/usb/role.h>
#define EUD_REG_INT1_EN_MASK 0x0024
#define EUD_REG_INT_STATUS_1 0x0044
#define EUD_REG_CTL_OUT_1 0x0074
#define EUD_REG_VBUS_INT_CLR 0x0080
#define EUD_REG_CSR_EUD_EN 0x1014
#define EUD_REG_SW_ATTACH_DET 0x1018
#define EUD_REG_EUD_EN2 0x0000
#define EUD_ENABLE BIT(0)
#define EUD_INT_PET_EUD BIT(0)
#define EUD_INT_VBUS BIT(2)
#define EUD_INT_SAFE_MODE BIT(4)
#define EUD_INT_ALL (EUD_INT_VBUS | EUD_INT_SAFE_MODE)
struct eud_chip {
struct device *dev;
struct usb_role_switch *role_sw;
void __iomem *base;
void __iomem *mode_mgr;
unsigned int int_status;
int irq;
bool enabled;
bool usb_attached;
};
static int enable_eud(struct eud_chip *priv)
{
writel(EUD_ENABLE, priv->base + EUD_REG_CSR_EUD_EN);
writel(EUD_INT_VBUS | EUD_INT_SAFE_MODE,
priv->base + EUD_REG_INT1_EN_MASK);
writel(1, priv->mode_mgr + EUD_REG_EUD_EN2);
return usb_role_switch_set_role(priv->role_sw, USB_ROLE_DEVICE);
}
static void disable_eud(struct eud_chip *priv)
{
writel(0, priv->base + EUD_REG_CSR_EUD_EN);
writel(0, priv->mode_mgr + EUD_REG_EUD_EN2);
}
static ssize_t enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct eud_chip *chip = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", chip->enabled);
}
static ssize_t enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct eud_chip *chip = dev_get_drvdata(dev);
bool enable;
int ret;
if (kstrtobool(buf, &enable))
return -EINVAL;
if (enable) {
ret = enable_eud(chip);
if (!ret)
chip->enabled = enable;
else
disable_eud(chip);
} else {
disable_eud(chip);
}
return count;
}
static DEVICE_ATTR_RW(enable);
static struct attribute *eud_attrs[] = {
&dev_attr_enable.attr,
NULL,
};
ATTRIBUTE_GROUPS(eud);
static void usb_attach_detach(struct eud_chip *chip)
{
u32 reg;
/* read ctl_out_1[4] to find USB attach or detach event */
reg = readl(chip->base + EUD_REG_CTL_OUT_1);
chip->usb_attached = reg & EUD_INT_SAFE_MODE;
}
static void pet_eud(struct eud_chip *chip)
{
u32 reg;
int ret;
/* When the EUD_INT_PET_EUD in SW_ATTACH_DET is set, the cable has been
* disconnected and we need to detach the pet to check if EUD is in safe
* mode before attaching again.
*/
reg = readl(chip->base + EUD_REG_SW_ATTACH_DET);
if (reg & EUD_INT_PET_EUD) {
/* Detach & Attach pet for EUD */
writel(0, chip->base + EUD_REG_SW_ATTACH_DET);
/* Delay to make sure detach pet is done before attach pet */
ret = readl_poll_timeout(chip->base + EUD_REG_SW_ATTACH_DET,
reg, (reg == 0), 1, 100);
if (ret) {
dev_err(chip->dev, "Detach pet failed\n");
return;
}
}
/* Attach pet for EUD */
writel(EUD_INT_PET_EUD, chip->base + EUD_REG_SW_ATTACH_DET);
}
static irqreturn_t handle_eud_irq(int irq, void *data)
{
struct eud_chip *chip = data;
u32 reg;
reg = readl(chip->base + EUD_REG_INT_STATUS_1);
switch (reg & EUD_INT_ALL) {
case EUD_INT_VBUS:
usb_attach_detach(chip);
return IRQ_WAKE_THREAD;
case EUD_INT_SAFE_MODE:
pet_eud(chip);
return IRQ_HANDLED;
default:
return IRQ_NONE;
}
}
static irqreturn_t handle_eud_irq_thread(int irq, void *data)
{
struct eud_chip *chip = data;
int ret;
if (chip->usb_attached)
ret = usb_role_switch_set_role(chip->role_sw, USB_ROLE_DEVICE);
else
ret = usb_role_switch_set_role(chip->role_sw, USB_ROLE_HOST);
if (ret)
dev_err(chip->dev, "failed to set role switch\n");
/* set and clear vbus_int_clr[0] to clear interrupt */
writel(BIT(0), chip->base + EUD_REG_VBUS_INT_CLR);
writel(0, chip->base + EUD_REG_VBUS_INT_CLR);
return IRQ_HANDLED;
}
static void eud_role_switch_release(void *data)
{
struct eud_chip *chip = data;
usb_role_switch_put(chip->role_sw);
}
static int eud_probe(struct platform_device *pdev)
{
struct eud_chip *chip;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = &pdev->dev;
chip->role_sw = usb_role_switch_get(&pdev->dev);
if (IS_ERR(chip->role_sw))
return dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
"failed to get role switch\n");
ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip);
if (ret)
return dev_err_probe(chip->dev, ret,
"failed to add role switch release action\n");
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
chip->mode_mgr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->mode_mgr))
return PTR_ERR(chip->mode_mgr);
chip->irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, chip->irq, handle_eud_irq,
handle_eud_irq_thread, IRQF_ONESHOT, NULL, chip);
if (ret)
return dev_err_probe(chip->dev, ret, "failed to allocate irq\n");
enable_irq_wake(chip->irq);
platform_set_drvdata(pdev, chip);
return 0;
}
static void eud_remove(struct platform_device *pdev)
{
struct eud_chip *chip = platform_get_drvdata(pdev);
if (chip->enabled)
disable_eud(chip);
device_init_wakeup(&pdev->dev, false);
disable_irq_wake(chip->irq);
}
static const struct of_device_id eud_dt_match[] = {
{ .compatible = "qcom,sc7280-eud" },
{ }
};
MODULE_DEVICE_TABLE(of, eud_dt_match);
static struct platform_driver eud_driver = {
.probe = eud_probe,
.remove_new = eud_remove,
.driver = {
.name = "qcom_eud",
.dev_groups = eud_groups,
.of_match_table = eud_dt_match,
},
};
module_platform_driver(eud_driver);
MODULE_DESCRIPTION("QTI EUD driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/misc/qcom_eud.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB4604 USB HSIC 4-port 2.0 hub controller driver
* Based on usb3503 driver
*
* Copyright (c) 2012-2013 Dongjin Kim ([email protected])
* Copyright (c) 2016 Linaro Ltd.
*/
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
enum usb4604_mode {
USB4604_MODE_UNKNOWN,
USB4604_MODE_HUB,
USB4604_MODE_STANDBY,
};
struct usb4604 {
enum usb4604_mode mode;
struct device *dev;
struct gpio_desc *gpio_reset;
};
static void usb4604_reset(struct usb4604 *hub, int state)
{
gpiod_set_value_cansleep(hub->gpio_reset, state);
/* Wait for i2c logic to come up */
if (state)
msleep(250);
}
static int usb4604_connect(struct usb4604 *hub)
{
struct device *dev = hub->dev;
struct i2c_client *client = to_i2c_client(dev);
int err;
u8 connect_cmd[] = { 0xaa, 0x55, 0x00 };
usb4604_reset(hub, 1);
err = i2c_master_send(client, connect_cmd, ARRAY_SIZE(connect_cmd));
if (err < 0) {
usb4604_reset(hub, 0);
return err;
}
hub->mode = USB4604_MODE_HUB;
dev_dbg(dev, "switched to HUB mode\n");
return 0;
}
static int usb4604_switch_mode(struct usb4604 *hub, enum usb4604_mode mode)
{
struct device *dev = hub->dev;
int err = 0;
switch (mode) {
case USB4604_MODE_HUB:
err = usb4604_connect(hub);
break;
case USB4604_MODE_STANDBY:
usb4604_reset(hub, 0);
dev_dbg(dev, "switched to STANDBY mode\n");
break;
default:
dev_err(dev, "unknown mode is requested\n");
err = -EINVAL;
break;
}
return err;
}
static int usb4604_probe(struct usb4604 *hub)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
struct gpio_desc *gpio;
u32 mode = USB4604_MODE_HUB;
gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
hub->gpio_reset = gpio;
if (of_property_read_u32(np, "initial-mode", &hub->mode))
hub->mode = mode;
return usb4604_switch_mode(hub, hub->mode);
}
static int usb4604_i2c_probe(struct i2c_client *i2c)
{
struct usb4604 *hub;
hub = devm_kzalloc(&i2c->dev, sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
i2c_set_clientdata(i2c, hub);
hub->dev = &i2c->dev;
return usb4604_probe(hub);
}
static int __maybe_unused usb4604_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb4604 *hub = i2c_get_clientdata(client);
usb4604_switch_mode(hub, USB4604_MODE_STANDBY);
return 0;
}
static int __maybe_unused usb4604_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb4604 *hub = i2c_get_clientdata(client);
usb4604_switch_mode(hub, hub->mode);
return 0;
}
static SIMPLE_DEV_PM_OPS(usb4604_i2c_pm_ops, usb4604_i2c_suspend,
usb4604_i2c_resume);
static const struct i2c_device_id usb4604_id[] = {
{ "usb4604", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, usb4604_id);
#ifdef CONFIG_OF
static const struct of_device_id usb4604_of_match[] = {
{ .compatible = "smsc,usb4604" },
{}
};
MODULE_DEVICE_TABLE(of, usb4604_of_match);
#endif
static struct i2c_driver usb4604_i2c_driver = {
.driver = {
.name = "usb4604",
.pm = pm_ptr(&usb4604_i2c_pm_ops),
.of_match_table = of_match_ptr(usb4604_of_match),
},
.probe = usb4604_i2c_probe,
.id_table = usb4604_id,
};
module_i2c_driver(usb4604_i2c_driver);
MODULE_DESCRIPTION("USB4604 USB HUB driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/misc/usb4604.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EZ-USB specific functions used by some of the USB to Serial drivers.
*
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman ([email protected])
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#include <linux/usb/ezusb.h>
struct ezusb_fx_type {
/* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */
unsigned short cpucs_reg;
unsigned short max_internal_adress;
};
static const struct ezusb_fx_type ezusb_fx1 = {
.cpucs_reg = 0x7F92,
.max_internal_adress = 0x1B3F,
};
/* Commands for writing to memory */
#define WRITE_INT_RAM 0xA0
#define WRITE_EXT_RAM 0xA3
static int ezusb_writememory(struct usb_device *dev, int address,
unsigned char *data, int length, __u8 request)
{
if (!dev)
return -ENODEV;
return usb_control_msg_send(dev, 0, request,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
address, 0, data, length, 3000, GFP_KERNEL);
}
static int ezusb_set_reset(struct usb_device *dev, unsigned short cpucs_reg,
unsigned char reset_bit)
{
int response = ezusb_writememory(dev, cpucs_reg, &reset_bit, 1, WRITE_INT_RAM);
if (response < 0)
dev_err(&dev->dev, "%s-%d failed: %d\n",
__func__, reset_bit, response);
return response;
}
int ezusb_fx1_set_reset(struct usb_device *dev, unsigned char reset_bit)
{
return ezusb_set_reset(dev, ezusb_fx1.cpucs_reg, reset_bit);
}
EXPORT_SYMBOL_GPL(ezusb_fx1_set_reset);
static int ezusb_ihex_firmware_download(struct usb_device *dev,
struct ezusb_fx_type fx,
const char *firmware_path)
{
int ret = -ENOENT;
const struct firmware *firmware = NULL;
const struct ihex_binrec *record;
if (request_ihex_firmware(&firmware, firmware_path,
&dev->dev)) {
dev_err(&dev->dev,
"%s - request \"%s\" failed\n",
__func__, firmware_path);
goto out;
}
ret = ezusb_set_reset(dev, fx.cpucs_reg, 0);
if (ret < 0)
goto out;
record = (const struct ihex_binrec *)firmware->data;
for (; record; record = ihex_next_binrec(record)) {
if (be32_to_cpu(record->addr) > fx.max_internal_adress) {
ret = ezusb_writememory(dev, be32_to_cpu(record->addr),
(unsigned char *)record->data,
be16_to_cpu(record->len), WRITE_EXT_RAM);
if (ret < 0) {
dev_err(&dev->dev, "%s - ezusb_writememory "
"failed writing internal memory "
"(%d %04X %p %d)\n", __func__, ret,
be32_to_cpu(record->addr), record->data,
be16_to_cpu(record->len));
goto out;
}
}
}
ret = ezusb_set_reset(dev, fx.cpucs_reg, 1);
if (ret < 0)
goto out;
record = (const struct ihex_binrec *)firmware->data;
for (; record; record = ihex_next_binrec(record)) {
if (be32_to_cpu(record->addr) <= fx.max_internal_adress) {
ret = ezusb_writememory(dev, be32_to_cpu(record->addr),
(unsigned char *)record->data,
be16_to_cpu(record->len), WRITE_INT_RAM);
if (ret < 0) {
dev_err(&dev->dev, "%s - ezusb_writememory "
"failed writing external memory "
"(%d %04X %p %d)\n", __func__, ret,
be32_to_cpu(record->addr), record->data,
be16_to_cpu(record->len));
goto out;
}
}
}
ret = ezusb_set_reset(dev, fx.cpucs_reg, 0);
out:
release_firmware(firmware);
return ret;
}
int ezusb_fx1_ihex_firmware_download(struct usb_device *dev,
const char *firmware_path)
{
return ezusb_ihex_firmware_download(dev, ezusb_fx1, firmware_path);
}
EXPORT_SYMBOL_GPL(ezusb_fx1_ihex_firmware_download);
#if 0
/*
* Once someone one needs these fx2 functions, uncomment them
* and add them to ezusb.h and all should be good.
*/
static struct ezusb_fx_type ezusb_fx2 = {
.cpucs_reg = 0xE600,
.max_internal_adress = 0x3FFF,
};
int ezusb_fx2_set_reset(struct usb_device *dev, unsigned char reset_bit)
{
return ezusb_set_reset(dev, ezusb_fx2.cpucs_reg, reset_bit);
}
EXPORT_SYMBOL_GPL(ezusb_fx2_set_reset);
int ezusb_fx2_ihex_firmware_download(struct usb_device *dev,
const char *firmware_path)
{
return ezusb_ihex_firmware_download(dev, ezusb_fx2, firmware_path);
}
EXPORT_SYMBOL_GPL(ezusb_fx2_ihex_firmware_download);
#endif
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/ezusb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Microchip USB251xB USB 2.0 Hi-Speed Hub Controller
* Configuration via SMBus.
*
* Copyright (c) 2017 SKIDATA AG
*
* This work is based on the USB3503 driver by Dongjin Kim and
* a not-accepted patch by Fabien Lahoudere, see:
* https://patchwork.kernel.org/patch/9257715/
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
#define USB251XB_ADDR_VENDOR_ID_LSB 0x00
#define USB251XB_ADDR_VENDOR_ID_MSB 0x01
#define USB251XB_DEF_VENDOR_ID 0x0424
#define USB251XB_ADDR_PRODUCT_ID_LSB 0x02
#define USB251XB_ADDR_PRODUCT_ID_MSB 0x03
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
#define USB251XB_DEF_DEVICE_ID 0x0BB3
#define USB251XB_ADDR_CONFIG_DATA_1 0x06
#define USB251XB_DEF_CONFIG_DATA_1 0x9B
#define USB251XB_ADDR_CONFIG_DATA_2 0x07
#define USB251XB_DEF_CONFIG_DATA_2 0x20
#define USB251XB_ADDR_CONFIG_DATA_3 0x08
#define USB251XB_DEF_CONFIG_DATA_3 0x02
#define USB251XB_ADDR_NON_REMOVABLE_DEVICES 0x09
#define USB251XB_DEF_NON_REMOVABLE_DEVICES 0x00
#define USB251XB_ADDR_PORT_DISABLE_SELF 0x0A
#define USB251XB_DEF_PORT_DISABLE_SELF 0x00
#define USB251XB_ADDR_PORT_DISABLE_BUS 0x0B
#define USB251XB_DEF_PORT_DISABLE_BUS 0x00
#define USB251XB_ADDR_MAX_POWER_SELF 0x0C
#define USB251XB_DEF_MAX_POWER_SELF 0x01
#define USB251XB_ADDR_MAX_POWER_BUS 0x0D
#define USB251XB_DEF_MAX_POWER_BUS 0x32
#define USB251XB_ADDR_MAX_CURRENT_SELF 0x0E
#define USB251XB_DEF_MAX_CURRENT_SELF 0x01
#define USB251XB_ADDR_MAX_CURRENT_BUS 0x0F
#define USB251XB_DEF_MAX_CURRENT_BUS 0x32
#define USB251XB_ADDR_POWER_ON_TIME 0x10
#define USB251XB_DEF_POWER_ON_TIME 0x32
#define USB251XB_ADDR_LANGUAGE_ID_HIGH 0x11
#define USB251XB_ADDR_LANGUAGE_ID_LOW 0x12
#define USB251XB_DEF_LANGUAGE_ID 0x0000
#define USB251XB_STRING_BUFSIZE 62
#define USB251XB_ADDR_MANUFACTURER_STRING_LEN 0x13
#define USB251XB_ADDR_MANUFACTURER_STRING 0x16
#define USB251XB_DEF_MANUFACTURER_STRING "Microchip"
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
#define USB251XB_DEF_SERIAL_STRING ""
#define USB251XB_ADDR_BATTERY_CHARGING_ENABLE 0xD0
#define USB251XB_DEF_BATTERY_CHARGING_ENABLE 0x00
#define USB251XB_ADDR_BOOST_UP 0xF6
#define USB251XB_DEF_BOOST_UP 0x00
#define USB251XB_ADDR_BOOST_57 0xF7
#define USB251XB_DEF_BOOST_57 0x00
#define USB251XB_ADDR_BOOST_14 0xF8
#define USB251XB_DEF_BOOST_14 0x00
#define USB251XB_ADDR_PORT_SWAP 0xFA
#define USB251XB_DEF_PORT_SWAP 0x00
#define USB251XB_ADDR_PORT_MAP_12 0xFB
#define USB251XB_DEF_PORT_MAP_12 0x00
#define USB251XB_ADDR_PORT_MAP_34 0xFC
#define USB251XB_DEF_PORT_MAP_34 0x00 /* USB251{3B/i,4B/i,7/i} only */
#define USB251XB_ADDR_PORT_MAP_56 0xFD
#define USB251XB_DEF_PORT_MAP_56 0x00 /* USB2517/i only */
#define USB251XB_ADDR_PORT_MAP_7 0xFE
#define USB251XB_DEF_PORT_MAP_7 0x00 /* USB2517/i only */
#define USB251XB_ADDR_STATUS_COMMAND 0xFF
#define USB251XB_STATUS_COMMAND_SMBUS_DOWN 0x04
#define USB251XB_STATUS_COMMAND_RESET 0x02
#define USB251XB_STATUS_COMMAND_ATTACH 0x01
#define USB251XB_I2C_REG_SZ 0x100
#define USB251XB_I2C_WRITE_SZ 0x10
#define DRIVER_NAME "usb251xb"
#define DRIVER_DESC "Microchip USB 2.0 Hi-Speed Hub Controller"
struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
struct regulator *vdd;
u8 skip_config;
struct gpio_desc *gpio_reset;
u16 vendor_id;
u16 product_id;
u16 device_id;
u8 conf_data1;
u8 conf_data2;
u8 conf_data3;
u8 non_rem_dev;
u8 port_disable_sp;
u8 port_disable_bp;
u8 max_power_sp;
u8 max_power_bp;
u8 max_current_sp;
u8 max_current_bp;
u8 power_on_time;
u16 lang_id;
u8 manufacturer_len;
u8 product_len;
u8 serial_len;
char manufacturer[USB251XB_STRING_BUFSIZE];
char product[USB251XB_STRING_BUFSIZE];
char serial[USB251XB_STRING_BUFSIZE];
u8 bat_charge_en;
u8 boost_up;
u8 boost_57;
u8 boost_14;
u8 port_swap;
u8 port_map12;
u8 port_map34;
u8 port_map56;
u8 port_map7;
u8 status;
};
struct usb251xb_data {
u16 product_id;
u8 port_cnt;
bool led_support;
bool bat_support;
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
static const struct usb251xb_data usb2422_data = {
.product_id = 0x2422,
.port_cnt = 2,
.led_support = false,
.bat_support = true,
.product_str = "USB2422",
};
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
.port_cnt = 2,
.led_support = false,
.bat_support = true,
.product_str = "USB2512B",
};
static const struct usb251xb_data usb2512bi_data = {
.product_id = 0x2512,
.port_cnt = 2,
.led_support = false,
.bat_support = true,
.product_str = "USB2512Bi",
};
static const struct usb251xb_data usb2513b_data = {
.product_id = 0x2513,
.port_cnt = 3,
.led_support = false,
.bat_support = true,
.product_str = "USB2513B",
};
static const struct usb251xb_data usb2513bi_data = {
.product_id = 0x2513,
.port_cnt = 3,
.led_support = false,
.bat_support = true,
.product_str = "USB2513Bi",
};
static const struct usb251xb_data usb2514b_data = {
.product_id = 0x2514,
.port_cnt = 4,
.led_support = false,
.bat_support = true,
.product_str = "USB2514B",
};
static const struct usb251xb_data usb2514bi_data = {
.product_id = 0x2514,
.port_cnt = 4,
.led_support = false,
.bat_support = true,
.product_str = "USB2514Bi",
};
static const struct usb251xb_data usb2517_data = {
.product_id = 0x2517,
.port_cnt = 7,
.led_support = true,
.bat_support = false,
.product_str = "USB2517",
};
static const struct usb251xb_data usb2517i_data = {
.product_id = 0x2517,
.port_cnt = 7,
.led_support = true,
.bat_support = false,
.product_str = "USB2517i",
};
#ifdef CONFIG_GPIOLIB
static int usb251xb_check_dev_children(struct device *dev, void *child)
{
if (dev->type == &i2c_adapter_type) {
return device_for_each_child(dev, child,
usb251xb_check_dev_children);
}
return (dev == child);
}
static int usb251x_check_gpio_chip(struct usb251xb *hub)
{
struct gpio_chip *gc = gpiod_to_chip(hub->gpio_reset);
struct i2c_adapter *adap = hub->i2c->adapter;
int ret;
if (!hub->gpio_reset)
return 0;
if (!gc)
return -EINVAL;
ret = usb251xb_check_dev_children(&adap->dev, gc->parent);
if (ret) {
dev_err(hub->dev, "Reset GPIO chip is at the same i2c-bus\n");
return -EINVAL;
}
return 0;
}
#else
static int usb251x_check_gpio_chip(struct usb251xb *hub)
{
return 0;
}
#endif
static void usb251xb_reset(struct usb251xb *hub)
{
if (!hub->gpio_reset)
return;
i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
gpiod_set_value_cansleep(hub->gpio_reset, 1);
usleep_range(1, 10); /* >=1us RESET_N asserted */
gpiod_set_value_cansleep(hub->gpio_reset, 0);
/* wait for hub recovery/stabilization */
usleep_range(500, 750); /* >=500us after RESET_N deasserted */
i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
static int usb251xb_connect(struct usb251xb *hub)
{
struct device *dev = hub->dev;
int err, i;
char i2c_wb[USB251XB_I2C_REG_SZ];
memset(i2c_wb, 0, USB251XB_I2C_REG_SZ);
if (hub->skip_config) {
dev_info(dev, "Skip hub configuration, only attach.\n");
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
usb251xb_reset(hub);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
if (err) {
dev_err(dev, "attaching hub failed: %d\n", err);
return err;
}
return 0;
}
i2c_wb[USB251XB_ADDR_VENDOR_ID_MSB] = (hub->vendor_id >> 8) & 0xFF;
i2c_wb[USB251XB_ADDR_VENDOR_ID_LSB] = hub->vendor_id & 0xFF;
i2c_wb[USB251XB_ADDR_PRODUCT_ID_MSB] = (hub->product_id >> 8) & 0xFF;
i2c_wb[USB251XB_ADDR_PRODUCT_ID_LSB] = hub->product_id & 0xFF;
i2c_wb[USB251XB_ADDR_DEVICE_ID_MSB] = (hub->device_id >> 8) & 0xFF;
i2c_wb[USB251XB_ADDR_DEVICE_ID_LSB] = hub->device_id & 0xFF;
i2c_wb[USB251XB_ADDR_CONFIG_DATA_1] = hub->conf_data1;
i2c_wb[USB251XB_ADDR_CONFIG_DATA_2] = hub->conf_data2;
i2c_wb[USB251XB_ADDR_CONFIG_DATA_3] = hub->conf_data3;
i2c_wb[USB251XB_ADDR_NON_REMOVABLE_DEVICES] = hub->non_rem_dev;
i2c_wb[USB251XB_ADDR_PORT_DISABLE_SELF] = hub->port_disable_sp;
i2c_wb[USB251XB_ADDR_PORT_DISABLE_BUS] = hub->port_disable_bp;
i2c_wb[USB251XB_ADDR_MAX_POWER_SELF] = hub->max_power_sp;
i2c_wb[USB251XB_ADDR_MAX_POWER_BUS] = hub->max_power_bp;
i2c_wb[USB251XB_ADDR_MAX_CURRENT_SELF] = hub->max_current_sp;
i2c_wb[USB251XB_ADDR_MAX_CURRENT_BUS] = hub->max_current_bp;
i2c_wb[USB251XB_ADDR_POWER_ON_TIME] = hub->power_on_time;
i2c_wb[USB251XB_ADDR_LANGUAGE_ID_HIGH] = (hub->lang_id >> 8) & 0xFF;
i2c_wb[USB251XB_ADDR_LANGUAGE_ID_LOW] = hub->lang_id & 0xFF;
i2c_wb[USB251XB_ADDR_MANUFACTURER_STRING_LEN] = hub->manufacturer_len;
i2c_wb[USB251XB_ADDR_PRODUCT_STRING_LEN] = hub->product_len;
i2c_wb[USB251XB_ADDR_SERIAL_STRING_LEN] = hub->serial_len;
memcpy(&i2c_wb[USB251XB_ADDR_MANUFACTURER_STRING], hub->manufacturer,
USB251XB_STRING_BUFSIZE);
memcpy(&i2c_wb[USB251XB_ADDR_SERIAL_STRING], hub->serial,
USB251XB_STRING_BUFSIZE);
memcpy(&i2c_wb[USB251XB_ADDR_PRODUCT_STRING], hub->product,
USB251XB_STRING_BUFSIZE);
i2c_wb[USB251XB_ADDR_BATTERY_CHARGING_ENABLE] = hub->bat_charge_en;
i2c_wb[USB251XB_ADDR_BOOST_UP] = hub->boost_up;
i2c_wb[USB251XB_ADDR_BOOST_57] = hub->boost_57;
i2c_wb[USB251XB_ADDR_BOOST_14] = hub->boost_14;
i2c_wb[USB251XB_ADDR_PORT_SWAP] = hub->port_swap;
i2c_wb[USB251XB_ADDR_PORT_MAP_12] = hub->port_map12;
i2c_wb[USB251XB_ADDR_PORT_MAP_34] = hub->port_map34;
i2c_wb[USB251XB_ADDR_PORT_MAP_56] = hub->port_map56;
i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
usb251xb_reset(hub);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
int offset = i * USB251XB_I2C_WRITE_SZ;
char wbuf[USB251XB_I2C_WRITE_SZ + 1];
/* The first data byte transferred tells the hub how many data
* bytes will follow (byte count).
*/
wbuf[0] = USB251XB_I2C_WRITE_SZ;
memcpy(&wbuf[1], &i2c_wb[offset], USB251XB_I2C_WRITE_SZ);
dev_dbg(dev, "writing %d byte block %d to 0x%02X\n",
USB251XB_I2C_WRITE_SZ, i, offset);
err = i2c_smbus_write_i2c_block_data(hub->i2c, offset,
USB251XB_I2C_WRITE_SZ + 1,
wbuf);
if (err)
goto out_err;
}
dev_info(dev, "Hub configuration was successful.\n");
return 0;
out_err:
dev_err(dev, "configuring block %d failed: %d\n", i, err);
return err;
}
static void usb251xb_get_ports_field(struct usb251xb *hub,
const char *prop_name, u8 port_cnt,
bool ds_only, u8 *fld)
{
struct device *dev = hub->dev;
struct property *prop;
const __be32 *p;
u32 port;
of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
*fld |= BIT(port);
else
dev_warn(dev, "port %u doesn't exist\n", port);
}
}
static int usb251xb_get_ofdata(struct usb251xb *hub,
const struct usb251xb_data *data)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
int len;
u32 property_u32 = 0;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
if (!np) {
dev_err(dev, "failed to get ofdata\n");
return -ENODEV;
}
hub->skip_config = of_property_read_bool(np, "skip-config");
hub->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(hub->gpio_reset))
return dev_err_probe(dev, PTR_ERR(hub->gpio_reset),
"unable to request GPIO reset pin\n");
if (of_property_read_u16(np, "vendor-id", &hub->vendor_id))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
if (of_property_read_u16(np, "product-id", &hub->product_id))
hub->product_id = data->product_id;
if (of_property_read_u16(np, "device-id", &hub->device_id))
hub->device_id = USB251XB_DEF_DEVICE_ID;
hub->conf_data1 = USB251XB_DEF_CONFIG_DATA_1;
if (of_property_read_bool(np, "self-powered")) {
hub->conf_data1 |= BIT(7);
/* Configure Over-Current sens when self-powered */
hub->conf_data1 &= ~BIT(2);
if (of_property_read_bool(np, "ganged-sensing"))
hub->conf_data1 &= ~BIT(1);
else if (of_property_read_bool(np, "individual-sensing"))
hub->conf_data1 |= BIT(1);
} else if (of_property_read_bool(np, "bus-powered")) {
hub->conf_data1 &= ~BIT(7);
/* Disable Over-Current sense when bus-powered */
hub->conf_data1 |= BIT(2);
}
if (of_property_read_bool(np, "disable-hi-speed"))
hub->conf_data1 |= BIT(5);
if (of_property_read_bool(np, "multi-tt"))
hub->conf_data1 |= BIT(4);
else if (of_property_read_bool(np, "single-tt"))
hub->conf_data1 &= ~BIT(4);
if (of_property_read_bool(np, "disable-eop"))
hub->conf_data1 |= BIT(3);
if (of_property_read_bool(np, "individual-port-switching"))
hub->conf_data1 |= BIT(0);
else if (of_property_read_bool(np, "ganged-port-switching"))
hub->conf_data1 &= ~BIT(0);
hub->conf_data2 = USB251XB_DEF_CONFIG_DATA_2;
if (of_property_read_bool(np, "dynamic-power-switching"))
hub->conf_data2 |= BIT(7);
if (!of_property_read_u32(np, "oc-delay-us", &property_u32)) {
if (property_u32 == 100) {
/* 100 us*/
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 &= ~BIT(4);
} else if (property_u32 == 4000) {
/* 4 ms */
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 |= BIT(4);
} else if (property_u32 == 16000) {
/* 16 ms */
hub->conf_data2 |= BIT(5);
hub->conf_data2 |= BIT(4);
} else {
/* 8 ms (DEFAULT) */
hub->conf_data2 |= BIT(5);
hub->conf_data2 &= ~BIT(4);
}
}
if (of_property_read_bool(np, "compound-device"))
hub->conf_data2 |= BIT(3);
hub->conf_data3 = USB251XB_DEF_CONFIG_DATA_3;
if (of_property_read_bool(np, "port-mapping-mode"))
hub->conf_data3 |= BIT(3);
if (data->led_support && of_get_property(np, "led-usb-mode", NULL))
hub->conf_data3 &= ~BIT(1);
if (of_property_read_bool(np, "string-support"))
hub->conf_data3 |= BIT(0);
hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
true, &hub->non_rem_dev);
hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
true, &hub->port_disable_sp);
hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
true, &hub->port_disable_bp);
hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
if (!of_property_read_u32(np, "sp-max-total-current-microamp",
&property_u32))
hub->max_power_sp = min_t(u8, property_u32 / 2000, 50);
hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
if (!of_property_read_u32(np, "bp-max-total-current-microamp",
&property_u32))
hub->max_power_bp = min_t(u8, property_u32 / 2000, 255);
hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
if (!of_property_read_u32(np, "sp-max-removable-current-microamp",
&property_u32))
hub->max_current_sp = min_t(u8, property_u32 / 2000, 50);
hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
if (!of_property_read_u32(np, "bp-max-removable-current-microamp",
&property_u32))
hub->max_current_bp = min_t(u8, property_u32 / 2000, 255);
hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
if (!of_property_read_u32(np, "power-on-time-ms", &property_u32))
hub->power_on_time = min_t(u8, property_u32 / 2, 255);
if (of_property_read_u16(np, "language-id", &hub->lang_id))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
if (of_property_read_u8(np, "boost-up", &hub->boost_up))
hub->boost_up = USB251XB_DEF_BOOST_UP;
cproperty_char = of_get_property(np, "manufacturer", NULL);
strscpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
hub->manufacturer_len = strlen(str) & 0xFF;
memset(hub->manufacturer, 0, USB251XB_STRING_BUFSIZE);
len = min_t(size_t, USB251XB_STRING_BUFSIZE / 2, strlen(str));
len = utf8s_to_utf16s(str, len, UTF16_LITTLE_ENDIAN,
(wchar_t *)hub->manufacturer,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "product", NULL);
strscpy(str, cproperty_char ? : data->product_str, sizeof(str));
hub->product_len = strlen(str) & 0xFF;
memset(hub->product, 0, USB251XB_STRING_BUFSIZE);
len = min_t(size_t, USB251XB_STRING_BUFSIZE / 2, strlen(str));
len = utf8s_to_utf16s(str, len, UTF16_LITTLE_ENDIAN,
(wchar_t *)hub->product,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "serial", NULL);
strscpy(str, cproperty_char ? : USB251XB_DEF_SERIAL_STRING,
sizeof(str));
hub->serial_len = strlen(str) & 0xFF;
memset(hub->serial, 0, USB251XB_STRING_BUFSIZE);
len = min_t(size_t, USB251XB_STRING_BUFSIZE / 2, strlen(str));
len = utf8s_to_utf16s(str, len, UTF16_LITTLE_ENDIAN,
(wchar_t *)hub->serial,
USB251XB_STRING_BUFSIZE);
/*
* The datasheet documents the register as 'Port Swap' but in real the
* register controls the USB DP/DM signal swapping for each port.
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
false, &hub->port_swap);
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
hub->port_map34 = USB251XB_DEF_PORT_MAP_34;
hub->port_map56 = USB251XB_DEF_PORT_MAP_56;
hub->port_map7 = USB251XB_DEF_PORT_MAP_7;
return 0;
}
static const struct of_device_id usb251xb_of_match[] = {
{
.compatible = "microchip,usb2422",
.data = &usb2422_data,
}, {
.compatible = "microchip,usb2512b",
.data = &usb2512b_data,
}, {
.compatible = "microchip,usb2512bi",
.data = &usb2512bi_data,
}, {
.compatible = "microchip,usb2513b",
.data = &usb2513b_data,
}, {
.compatible = "microchip,usb2513bi",
.data = &usb2513bi_data,
}, {
.compatible = "microchip,usb2514b",
.data = &usb2514b_data,
}, {
.compatible = "microchip,usb2514bi",
.data = &usb2514bi_data,
}, {
.compatible = "microchip,usb2517",
.data = &usb2517_data,
}, {
.compatible = "microchip,usb2517i",
.data = &usb2517i_data,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, usb251xb_of_match);
static void usb251xb_regulator_disable_action(void *data)
{
struct usb251xb *hub = data;
regulator_disable(hub->vdd);
}
static int usb251xb_probe(struct usb251xb *hub)
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
const struct usb251xb_data *usb_data = of_device_get_match_data(dev);
int err;
if (np && usb_data) {
err = usb251xb_get_ofdata(hub, usb_data);
if (err) {
dev_err(dev, "failed to get ofdata: %d\n", err);
return err;
}
}
/*
* usb251x SMBus-slave SCL lane is muxed with CFG_SEL0 pin. So if anyone
* tries to work with the bus at the moment the hub reset is released,
* it may cause an invalid config being latched by usb251x. Particularly
* one of the config modes makes the hub loading a default registers
* value without SMBus-slave interface activation. If the hub
* accidentally gets this mode, this will cause the driver SMBus-
* functions failure. Normally we could just lock the SMBus-segment the
* hub i2c-interface resides for the device-specific reset timing. But
* the GPIO controller, which is used to handle the hub reset, might be
* placed at the same i2c-bus segment. In this case an error should be
* returned since we can't safely use the GPIO controller to clear the
* reset state (it may affect the hub configuration) and we can't lock
* the i2c-bus segment (it will cause a deadlock).
*/
err = usb251x_check_gpio_chip(hub);
if (err)
return err;
hub->vdd = devm_regulator_get(dev, "vdd");
if (IS_ERR(hub->vdd))
return PTR_ERR(hub->vdd);
err = regulator_enable(hub->vdd);
if (err)
return err;
err = devm_add_action_or_reset(dev,
usb251xb_regulator_disable_action, hub);
if (err)
return err;
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
return err;
}
dev_info(dev, "Hub probed successfully\n");
return 0;
}
static int usb251xb_i2c_probe(struct i2c_client *i2c)
{
struct usb251xb *hub;
hub = devm_kzalloc(&i2c->dev, sizeof(struct usb251xb), GFP_KERNEL);
if (!hub)
return -ENOMEM;
i2c_set_clientdata(i2c, hub);
hub->dev = &i2c->dev;
hub->i2c = i2c;
return usb251xb_probe(hub);
}
static int __maybe_unused usb251xb_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb251xb *hub = i2c_get_clientdata(client);
return regulator_disable(hub->vdd);
}
static int __maybe_unused usb251xb_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct usb251xb *hub = i2c_get_clientdata(client);
int err;
err = regulator_enable(hub->vdd);
if (err)
return err;
return usb251xb_connect(hub);
}
static SIMPLE_DEV_PM_OPS(usb251xb_pm_ops, usb251xb_suspend, usb251xb_resume);
static const struct i2c_device_id usb251xb_id[] = {
{ "usb2422", 0 },
{ "usb2512b", 0 },
{ "usb2512bi", 0 },
{ "usb2513b", 0 },
{ "usb2513bi", 0 },
{ "usb2514b", 0 },
{ "usb2514bi", 0 },
{ "usb2517", 0 },
{ "usb2517i", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, usb251xb_id);
static struct i2c_driver usb251xb_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = usb251xb_of_match,
.pm = &usb251xb_pm_ops,
},
.probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
};
module_i2c_driver(usb251xb_i2c_driver);
MODULE_AUTHOR("Richard Leitner <[email protected]>");
MODULE_DESCRIPTION("USB251x/xBi USB 2.0 Hub Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/usb251xb.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* adutux - driver for ADU devices from Ontrak Control Systems
* This is an experimental driver. Use at your own risk.
* This driver is not supported by Ontrak Control Systems.
*
* Copyright (c) 2003 John Homppi (SCO, leave this notice here)
*
* derived from the Lego USB Tower driver 0.56:
* Copyright (c) 2003 David Glance <[email protected]>
* 2001 Juergen Stuber <[email protected]>
* that was derived from USB Skeleton driver - 0.5
* Copyright (c) 2001 Greg Kroah-Hartman ([email protected])
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#define DRIVER_AUTHOR "John Homppi"
#define DRIVER_DESC "adutux (see www.ontrak.net)"
/* Define these values to match your device */
#define ADU_VENDOR_ID 0x0a07
#define ADU_PRODUCT_ID 0x0064
/* table of devices that work with this driver */
static const struct usb_device_id device_table[] = {
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+100) }, /* ADU200 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+108) }, /* ADU208 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+118) }, /* ADU218 */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, device_table);
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define ADU_MINOR_BASE 0
#else
#define ADU_MINOR_BASE 67
#endif
/* we can have up to this number of device plugged in at once */
#define MAX_DEVICES 16
#define COMMAND_TIMEOUT (2*HZ)
/*
* The locking scheme is a vanilla 3-lock:
* adu_device.buflock: A spinlock, covers what IRQs touch.
* adutux_mutex: A Static lock to cover open_count. It would also cover
* any globals, but we don't have them in 2.6.
* adu_device.mtx: A mutex to hold across sleepers like copy_from_user.
* It covers all of adu_device, except the open_count
* and what .buflock covers.
*/
/* Structure to hold all of our device specific stuff */
struct adu_device {
struct mutex mtx;
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface;
unsigned int minor; /* the starting minor number for this device */
char serial_number[8];
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
char *read_buffer_primary;
int read_buffer_length;
char *read_buffer_secondary;
int secondary_head;
int secondary_tail;
spinlock_t buflock;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
char *interrupt_in_buffer;
struct usb_endpoint_descriptor *interrupt_in_endpoint;
struct urb *interrupt_in_urb;
int read_urb_finished;
char *interrupt_out_buffer;
struct usb_endpoint_descriptor *interrupt_out_endpoint;
struct urb *interrupt_out_urb;
int out_urb_finished;
};
static DEFINE_MUTEX(adutux_mutex);
static struct usb_driver adu_driver;
static inline void adu_debug_data(struct device *dev, const char *function,
int size, const unsigned char *data)
{
dev_dbg(dev, "%s - length = %d, data = %*ph\n",
function, size, size, data);
}
/*
* adu_abort_transfers
* aborts transfers and frees associated data structures
*/
static void adu_abort_transfers(struct adu_device *dev)
{
unsigned long flags;
if (dev->disconnected)
return;
/* shutdown transfer */
/* XXX Anchor these instead */
spin_lock_irqsave(&dev->buflock, flags);
if (!dev->read_urb_finished) {
spin_unlock_irqrestore(&dev->buflock, flags);
usb_kill_urb(dev->interrupt_in_urb);
} else
spin_unlock_irqrestore(&dev->buflock, flags);
spin_lock_irqsave(&dev->buflock, flags);
if (!dev->out_urb_finished) {
spin_unlock_irqrestore(&dev->buflock, flags);
wait_event_timeout(dev->write_wait, dev->out_urb_finished,
COMMAND_TIMEOUT);
usb_kill_urb(dev->interrupt_out_urb);
} else
spin_unlock_irqrestore(&dev->buflock, flags);
}
static void adu_delete(struct adu_device *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree(dev->read_buffer_primary);
kfree(dev->read_buffer_secondary);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
kfree(dev);
}
static void adu_interrupt_in_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
int status = urb->status;
unsigned long flags;
adu_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
spin_lock_irqsave(&dev->buflock, flags);
if (status != 0) {
if ((status != -ENOENT) && (status != -ECONNRESET) &&
(status != -ESHUTDOWN)) {
dev_dbg(&dev->udev->dev,
"%s : nonzero status received: %d\n",
__func__, status);
}
goto exit;
}
if (urb->actual_length > 0 && dev->interrupt_in_buffer[0] != 0x00) {
if (dev->read_buffer_length <
(4 * usb_endpoint_maxp(dev->interrupt_in_endpoint)) -
(urb->actual_length)) {
memcpy (dev->read_buffer_primary +
dev->read_buffer_length,
dev->interrupt_in_buffer, urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev_dbg(&dev->udev->dev, "%s reading %d\n", __func__,
urb->actual_length);
} else {
dev_dbg(&dev->udev->dev, "%s : read_buffer overflow\n",
__func__);
}
}
exit:
dev->read_urb_finished = 1;
spin_unlock_irqrestore(&dev->buflock, flags);
/* always wake up so we recover from errors */
wake_up_interruptible(&dev->read_wait);
}
static void adu_interrupt_out_callback(struct urb *urb)
{
struct adu_device *dev = urb->context;
int status = urb->status;
unsigned long flags;
adu_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
if (status != 0) {
if ((status != -ENOENT) &&
(status != -ESHUTDOWN) &&
(status != -ECONNRESET)) {
dev_dbg(&dev->udev->dev,
"%s :nonzero status received: %d\n", __func__,
status);
}
return;
}
spin_lock_irqsave(&dev->buflock, flags);
dev->out_urb_finished = 1;
wake_up(&dev->write_wait);
spin_unlock_irqrestore(&dev->buflock, flags);
}
static int adu_open(struct inode *inode, struct file *file)
{
struct adu_device *dev = NULL;
struct usb_interface *interface;
int subminor;
int retval;
subminor = iminor(inode);
retval = mutex_lock_interruptible(&adutux_mutex);
if (retval)
goto exit_no_lock;
interface = usb_find_interface(&adu_driver, subminor);
if (!interface) {
pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
retval = -ENODEV;
goto exit_no_device;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
goto exit_no_device;
}
/* check that nobody else is using the device */
if (dev->open_count) {
retval = -EBUSY;
goto exit_no_device;
}
++dev->open_count;
dev_dbg(&dev->udev->dev, "%s: open count %d\n", __func__,
dev->open_count);
/* save device in the file's private structure */
file->private_data = dev;
/* initialize in direction */
dev->read_buffer_length = 0;
/* fixup first read by having urb waiting for it */
usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback, dev,
dev->interrupt_in_endpoint->bInterval);
dev->read_urb_finished = 0;
if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL))
dev->read_urb_finished = 1;
/* we ignore failure */
/* end of fixup for first read */
/* initialize out direction */
dev->out_urb_finished = 1;
retval = 0;
exit_no_device:
mutex_unlock(&adutux_mutex);
exit_no_lock:
return retval;
}
static void adu_release_internal(struct adu_device *dev)
{
/* decrement our usage count for the device */
--dev->open_count;
dev_dbg(&dev->udev->dev, "%s : open count %d\n", __func__,
dev->open_count);
if (dev->open_count <= 0) {
adu_abort_transfers(dev);
dev->open_count = 0;
}
}
static int adu_release(struct inode *inode, struct file *file)
{
struct adu_device *dev;
int retval = 0;
if (file == NULL) {
retval = -ENODEV;
goto exit;
}
dev = file->private_data;
if (dev == NULL) {
retval = -ENODEV;
goto exit;
}
mutex_lock(&adutux_mutex); /* not interruptible */
if (dev->open_count <= 0) {
dev_dbg(&dev->udev->dev, "%s : device not opened\n", __func__);
retval = -ENODEV;
goto unlock;
}
adu_release_internal(dev);
if (dev->disconnected) {
/* the device was unplugged before the file was released */
if (!dev->open_count) /* ... and we're the last user */
adu_delete(dev);
}
unlock:
mutex_unlock(&adutux_mutex);
exit:
return retval;
}
static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
loff_t *ppos)
{
struct adu_device *dev;
size_t bytes_read = 0;
size_t bytes_to_read = count;
int retval = 0;
int timeout = 0;
int should_submit = 0;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
dev = file->private_data;
if (mutex_lock_interruptible(&dev->mtx))
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
}
/* verify that some data was requested */
if (count == 0) {
dev_dbg(&dev->udev->dev, "%s : read request of 0 bytes\n",
__func__);
goto exit;
}
timeout = COMMAND_TIMEOUT;
dev_dbg(&dev->udev->dev, "%s : about to start looping\n", __func__);
while (bytes_to_read) {
size_t data_in_secondary = dev->secondary_tail - dev->secondary_head;
dev_dbg(&dev->udev->dev,
"%s : while, data_in_secondary=%zu, status=%d\n",
__func__, data_in_secondary,
dev->interrupt_in_urb->status);
if (data_in_secondary) {
/* drain secondary buffer */
size_t amount = min(bytes_to_read, data_in_secondary);
if (copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount)) {
retval = -EFAULT;
goto exit;
}
dev->secondary_head += amount;
bytes_read += amount;
bytes_to_read -= amount;
} else {
/* we check the primary buffer */
spin_lock_irqsave (&dev->buflock, flags);
if (dev->read_buffer_length) {
/* we secure access to the primary */
dev_dbg(&dev->udev->dev,
"%s : swap, read_buffer_length = %d\n",
__func__, dev->read_buffer_length);
swap(dev->read_buffer_primary, dev->read_buffer_secondary);
dev->secondary_head = 0;
dev->secondary_tail = dev->read_buffer_length;
dev->read_buffer_length = 0;
spin_unlock_irqrestore(&dev->buflock, flags);
/* we have a free buffer so use it */
should_submit = 1;
} else {
/* even the primary was empty - we may need to do IO */
if (!dev->read_urb_finished) {
/* somebody is doing IO */
spin_unlock_irqrestore(&dev->buflock, flags);
dev_dbg(&dev->udev->dev,
"%s : submitted already\n",
__func__);
} else {
/* we must initiate input */
dev_dbg(&dev->udev->dev,
"%s : initiate input\n",
__func__);
dev->read_urb_finished = 0;
spin_unlock_irqrestore(&dev->buflock, flags);
usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback,
dev,
dev->interrupt_in_endpoint->bInterval);
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev->read_urb_finished = 1;
if (retval == -ENOMEM) {
retval = bytes_read ? bytes_read : -ENOMEM;
}
dev_dbg(&dev->udev->dev,
"%s : submit failed\n",
__func__);
goto exit;
}
}
/* we wait for I/O to complete */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&dev->read_wait, &wait);
spin_lock_irqsave(&dev->buflock, flags);
if (!dev->read_urb_finished) {
spin_unlock_irqrestore(&dev->buflock, flags);
timeout = schedule_timeout(COMMAND_TIMEOUT);
} else {
spin_unlock_irqrestore(&dev->buflock, flags);
set_current_state(TASK_RUNNING);
}
remove_wait_queue(&dev->read_wait, &wait);
if (timeout <= 0) {
dev_dbg(&dev->udev->dev,
"%s : timeout\n", __func__);
retval = bytes_read ? bytes_read : -ETIMEDOUT;
goto exit;
}
if (signal_pending(current)) {
dev_dbg(&dev->udev->dev,
"%s : signal pending\n",
__func__);
retval = bytes_read ? bytes_read : -EINTR;
goto exit;
}
}
}
}
retval = bytes_read;
/* if the primary buffer is empty then use it */
spin_lock_irqsave(&dev->buflock, flags);
if (should_submit && dev->read_urb_finished) {
dev->read_urb_finished = 0;
spin_unlock_irqrestore(&dev->buflock, flags);
usb_fill_int_urb(dev->interrupt_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
adu_interrupt_in_callback,
dev,
dev->interrupt_in_endpoint->bInterval);
if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0)
dev->read_urb_finished = 1;
/* we ignore failure */
} else {
spin_unlock_irqrestore(&dev->buflock, flags);
}
exit:
/* unlock the device */
mutex_unlock(&dev->mtx);
return retval;
}
static ssize_t adu_write(struct file *file, const __user char *buffer,
size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(waita, current);
struct adu_device *dev;
size_t bytes_written = 0;
size_t bytes_to_write;
size_t buffer_size;
unsigned long flags;
int retval;
dev = file->private_data;
retval = mutex_lock_interruptible(&dev->mtx);
if (retval)
goto exit_nolock;
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
dev_dbg(&dev->udev->dev, "%s : write request of 0 bytes\n",
__func__);
goto exit;
}
while (count > 0) {
add_wait_queue(&dev->write_wait, &waita);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&dev->buflock, flags);
if (!dev->out_urb_finished) {
spin_unlock_irqrestore(&dev->buflock, flags);
mutex_unlock(&dev->mtx);
if (signal_pending(current)) {
dev_dbg(&dev->udev->dev, "%s : interrupted\n",
__func__);
set_current_state(TASK_RUNNING);
retval = -EINTR;
goto exit_onqueue;
}
if (schedule_timeout(COMMAND_TIMEOUT) == 0) {
dev_dbg(&dev->udev->dev,
"%s - command timed out.\n", __func__);
retval = -ETIMEDOUT;
goto exit_onqueue;
}
remove_wait_queue(&dev->write_wait, &waita);
retval = mutex_lock_interruptible(&dev->mtx);
if (retval) {
retval = bytes_written ? bytes_written : retval;
goto exit_nolock;
}
dev_dbg(&dev->udev->dev,
"%s : in progress, count = %zd\n",
__func__, count);
} else {
spin_unlock_irqrestore(&dev->buflock, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->write_wait, &waita);
dev_dbg(&dev->udev->dev, "%s : sending, count = %zd\n",
__func__, count);
/* write the data into interrupt_out_buffer from userspace */
buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
bytes_to_write = count > buffer_size ? buffer_size : count;
dev_dbg(&dev->udev->dev,
"%s : buffer_size = %zd, count = %zd, bytes_to_write = %zd\n",
__func__, buffer_size, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
retval = -EFAULT;
goto exit;
}
/* send off the urb */
usb_fill_int_urb(
dev->interrupt_out_urb,
dev->udev,
usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
adu_interrupt_out_callback,
dev,
dev->interrupt_out_endpoint->bInterval);
dev->interrupt_out_urb->actual_length = bytes_to_write;
dev->out_urb_finished = 0;
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval < 0) {
dev->out_urb_finished = 1;
dev_err(&dev->udev->dev, "Couldn't submit "
"interrupt_out_urb %d\n", retval);
goto exit;
}
buffer += bytes_to_write;
count -= bytes_to_write;
bytes_written += bytes_to_write;
}
}
mutex_unlock(&dev->mtx);
return bytes_written;
exit:
mutex_unlock(&dev->mtx);
exit_nolock:
return retval;
exit_onqueue:
remove_wait_queue(&dev->write_wait, &waita);
return retval;
}
/* file operations needed when we register this driver */
static const struct file_operations adu_fops = {
.owner = THIS_MODULE,
.read = adu_read,
.write = adu_write,
.open = adu_open,
.release = adu_release,
.llseek = noop_llseek,
};
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with devfs and the driver core
*/
static struct usb_class_driver adu_class = {
.name = "usb/adutux%d",
.fops = &adu_fops,
.minor_base = ADU_MINOR_BASE,
};
/*
* adu_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int adu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct adu_device *dev = NULL;
int retval = -ENOMEM;
int in_end_size;
int out_end_size;
int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
if (!dev)
return -ENOMEM;
mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
dev->udev = usb_get_dev(udev);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
res = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
&dev->interrupt_in_endpoint,
&dev->interrupt_out_endpoint);
if (res) {
dev_err(&interface->dev, "interrupt endpoints not found\n");
retval = res;
goto error;
}
in_end_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL);
if (!dev->read_buffer_primary)
goto error;
/* debug code prime the buffer */
memset(dev->read_buffer_primary, 'a', in_end_size);
memset(dev->read_buffer_primary + in_end_size, 'b', in_end_size);
memset(dev->read_buffer_primary + (2 * in_end_size), 'c', in_end_size);
memset(dev->read_buffer_primary + (3 * in_end_size), 'd', in_end_size);
dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL);
if (!dev->read_buffer_secondary)
goto error;
/* debug code prime the buffer */
memset(dev->read_buffer_secondary, 'e', in_end_size);
memset(dev->read_buffer_secondary + in_end_size, 'f', in_end_size);
memset(dev->read_buffer_secondary + (2 * in_end_size), 'g', in_end_size);
memset(dev->read_buffer_secondary + (3 * in_end_size), 'h', in_end_size);
dev->interrupt_in_buffer = kmalloc(in_end_size, GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
/* debug code prime the buffer */
memset(dev->interrupt_in_buffer, 'i', in_end_size);
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc(out_end_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number,
sizeof(dev->serial_number))) {
dev_err(&interface->dev, "Could not retrieve serial number\n");
retval = -EIO;
goto error;
}
dev_dbg(&interface->dev, "serial_number=%s", dev->serial_number);
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
retval = usb_register_dev(interface, &adu_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev, "Not able to get a minor for this device.\n");
usb_set_intfdata(interface, NULL);
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
(dev->minor - ADU_MINOR_BASE));
return 0;
error:
adu_delete(dev);
return retval;
}
/*
* adu_disconnect
*
* Called by the usb core when the device is removed from the system.
*/
static void adu_disconnect(struct usb_interface *interface)
{
struct adu_device *dev;
dev = usb_get_intfdata(interface);
usb_deregister_dev(interface, &adu_class);
usb_poison_urb(dev->interrupt_in_urb);
usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&adutux_mutex);
usb_set_intfdata(interface, NULL);
mutex_lock(&dev->mtx); /* not interruptible */
dev->disconnected = 1;
mutex_unlock(&dev->mtx);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count)
adu_delete(dev);
mutex_unlock(&adutux_mutex);
}
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver adu_driver = {
.name = "adutux",
.probe = adu_probe,
.disconnect = adu_disconnect,
.id_table = device_table,
};
module_usb_driver(adu_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/adutux.c |
// SPDX-License-Identifier: GPL-2.0
/*
* cypress_cy7c63.c
*
* Copyright (c) 2006-2007 Oliver Bock ([email protected])
*
* This driver is based on the Cypress USB Driver by Marcus Maul
* (cyport) and the 2.0 version of Greg Kroah-Hartman's
* USB Skeleton driver.
*
* This is a generic driver for the Cypress CY7C63xxx family.
* For the time being it enables you to read from and write to
* the single I/O ports of the device.
*
* Supported vendors: AK Modul-Bus Computer GmbH
* (Firmware "Port-Chip")
*
* Supported devices: CY7C63001A-PC
* CY7C63001C-PXC
* CY7C63001C-SXC
*
* Supported functions: Read/Write Ports
*
*
* For up-to-date information please visit:
* http://www.obock.de/kernel/cypress
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Oliver Bock ([email protected])"
#define DRIVER_DESC "Cypress CY7C63xxx USB driver"
#define CYPRESS_VENDOR_ID 0xa2c
#define CYPRESS_PRODUCT_ID 0x8
#define CYPRESS_READ_PORT 0x4
#define CYPRESS_WRITE_PORT 0x5
#define CYPRESS_READ_RAM 0x2
#define CYPRESS_WRITE_RAM 0x3
#define CYPRESS_READ_ROM 0x1
#define CYPRESS_READ_PORT_ID0 0
#define CYPRESS_WRITE_PORT_ID0 0
#define CYPRESS_READ_PORT_ID1 0x2
#define CYPRESS_WRITE_PORT_ID1 1
#define CYPRESS_MAX_REQSIZE 8
/* table of devices that work with this driver */
static const struct usb_device_id cypress_table[] = {
{ USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, cypress_table);
/* structure to hold all of our device specific stuff */
struct cypress {
struct usb_device * udev;
unsigned char port[2];
};
/* used to send usb control messages to device */
static int vendor_command(struct cypress *dev, unsigned char request,
unsigned char address, unsigned char data)
{
int retval = 0;
unsigned int pipe;
unsigned char *iobuf;
/* allocate some memory for the i/o buffer*/
iobuf = kzalloc(CYPRESS_MAX_REQSIZE, GFP_KERNEL);
if (!iobuf) {
retval = -ENOMEM;
goto error;
}
dev_dbg(&dev->udev->dev, "Sending usb_control_msg (data: %d)\n", data);
/* prepare usb control message and send it upstream */
pipe = usb_rcvctrlpipe(dev->udev, 0);
retval = usb_control_msg(dev->udev, pipe, request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
address, data, iobuf, CYPRESS_MAX_REQSIZE,
USB_CTRL_GET_TIMEOUT);
/* store returned data (more READs to be added) */
switch (request) {
case CYPRESS_READ_PORT:
if (address == CYPRESS_READ_PORT_ID0) {
dev->port[0] = iobuf[1];
dev_dbg(&dev->udev->dev,
"READ_PORT0 returned: %d\n",
dev->port[0]);
}
else if (address == CYPRESS_READ_PORT_ID1) {
dev->port[1] = iobuf[1];
dev_dbg(&dev->udev->dev,
"READ_PORT1 returned: %d\n",
dev->port[1]);
}
break;
}
kfree(iobuf);
error:
return retval;
}
/* write port value */
static ssize_t write_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count,
int port_num, int write_id)
{
int value = -1;
int result = 0;
struct usb_interface *intf = to_usb_interface(dev);
struct cypress *cyp = usb_get_intfdata(intf);
dev_dbg(&cyp->udev->dev, "WRITE_PORT%d called\n", port_num);
/* validate input data */
if (sscanf(buf, "%d", &value) < 1) {
result = -EINVAL;
goto error;
}
if (value < 0 || value > 255) {
result = -EINVAL;
goto error;
}
result = vendor_command(cyp, CYPRESS_WRITE_PORT, write_id,
(unsigned char)value);
dev_dbg(&cyp->udev->dev, "Result of vendor_command: %d\n\n", result);
error:
return result < 0 ? result : count;
}
/* attribute callback handler (write) */
static ssize_t port0_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return write_port(dev, attr, buf, count, 0, CYPRESS_WRITE_PORT_ID0);
}
/* attribute callback handler (write) */
static ssize_t port1_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return write_port(dev, attr, buf, count, 1, CYPRESS_WRITE_PORT_ID1);
}
/* read port value */
static ssize_t read_port(struct device *dev, struct device_attribute *attr,
char *buf, int port_num, int read_id)
{
int result = 0;
struct usb_interface *intf = to_usb_interface(dev);
struct cypress *cyp = usb_get_intfdata(intf);
dev_dbg(&cyp->udev->dev, "READ_PORT%d called\n", port_num);
result = vendor_command(cyp, CYPRESS_READ_PORT, read_id, 0);
dev_dbg(&cyp->udev->dev, "Result of vendor_command: %d\n\n", result);
return sprintf(buf, "%d", cyp->port[port_num]);
}
/* attribute callback handler (read) */
static ssize_t port0_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 0, CYPRESS_READ_PORT_ID0);
}
static DEVICE_ATTR_RW(port0);
/* attribute callback handler (read) */
static ssize_t port1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
static DEVICE_ATTR_RW(port1);
static struct attribute *cypress_attrs[] = {
&dev_attr_port0.attr,
&dev_attr_port1.attr,
NULL,
};
ATTRIBUTE_GROUPS(cypress);
static int cypress_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct cypress *dev;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto error_mem;
dev->udev = usb_get_dev(interface_to_usbdev(interface));
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* let the user know that the device is now attached */
dev_info(&interface->dev,
"Cypress CY7C63xxx device now attached\n");
return 0;
error_mem:
return retval;
}
static void cypress_disconnect(struct usb_interface *interface)
{
struct cypress *dev;
dev = usb_get_intfdata(interface);
/* the intfdata can be set to NULL only after the
* device files have been removed */
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
dev_info(&interface->dev,
"Cypress CY7C63xxx device now disconnected\n");
kfree(dev);
}
static struct usb_driver cypress_driver = {
.name = "cypress_cy7c63",
.probe = cypress_probe,
.disconnect = cypress_disconnect,
.id_table = cypress_table,
.dev_groups = cypress_groups,
};
module_usb_driver(cypress_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/cypress_cy7c63.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/ch11.h>
#define TEST_SE0_NAK_PID 0x0101
#define TEST_J_PID 0x0102
#define TEST_K_PID 0x0103
#define TEST_PACKET_PID 0x0104
#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
const struct usb_device_id *id);
/*
* A list of USB hubs which requires to disable the power
* to the port before starting the testing procedures.
*/
static const struct usb_device_id ehset_hub_list[] = {
{ USB_DEVICE(0x0424, 0x4502) },
{ USB_DEVICE(0x0424, 0x4913) },
{ USB_DEVICE(0x0451, 0x8027) },
{ }
};
static int ehset_prepare_port_for_testing(struct usb_device *hub_udev, u16 portnum)
{
int ret = 0;
/*
* The USB2.0 spec chapter 11.24.2.13 says that the USB port which is
* going under test needs to be put in suspend before sending the
* test command. Most hubs don't enforce this precondition, but there
* are some hubs which needs to disable the power to the port before
* starting the test.
*/
if (usb_device_match_id(hub_udev, ehset_hub_list)) {
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_ENABLE,
portnum, NULL, 0, 1000, GFP_KERNEL);
/*
* Wait for the port to be disabled. It's an arbitrary value
* which worked every time.
*/
msleep(100);
} else {
/*
* For the hubs which are compliant with the spec,
* put the port in SUSPEND.
*/
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
}
return ret;
}
static int ehset_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret = -EINVAL;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_device *hub_udev = dev->parent;
struct usb_device_descriptor buf;
u8 portnum = dev->portnum;
u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
switch (test_pid) {
case TEST_SE0_NAK_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_SE0_NAK << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_J_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_J << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_K_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_K << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_PACKET_PID:
ret = ehset_prepare_port_for_testing(hub_udev, portnum);
if (ret < 0)
break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_PACKET << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_HS_HOST_PORT_SUSPEND_RESUME:
/* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
msleep(15 * 1000);
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
if (ret < 0)
break;
msleep(15 * 1000);
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
portnum, NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_SINGLE_STEP_GET_DEV_DESC:
/* Test: wait for 15secs -> GetDescriptor request */
msleep(15 * 1000);
ret = usb_control_msg_recv(dev, 0, USB_REQ_GET_DESCRIPTOR,
USB_DIR_IN, USB_DT_DEVICE << 8, 0,
&buf, USB_DT_DEVICE_SIZE,
USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
break;
case TEST_SINGLE_STEP_SET_FEATURE:
/*
* GetDescriptor SETUP request -> 15secs delay -> IN & STATUS
*
* Note, this test is only supported on root hubs since the
* SetPortFeature handling can only be done inside the HCD's
* hub_control callback function.
*/
if (hub_udev != dev->bus->root_hub) {
dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
break;
}
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(6 << 8) | portnum, NULL, 0,
60 * 1000, GFP_KERNEL);
break;
default:
dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
__func__, test_pid);
}
return ret;
}
static void ehset_disconnect(struct usb_interface *intf)
{
}
static const struct usb_device_id ehset_id_table[] = {
{ USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
{ USB_DEVICE(0x1a0a, TEST_J_PID) },
{ USB_DEVICE(0x1a0a, TEST_K_PID) },
{ USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
{ USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
{ USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, ehset_id_table);
static struct usb_driver ehset_driver = {
.name = "usb_ehset_test",
.probe = ehset_probe,
.disconnect = ehset_disconnect,
.id_table = ehset_id_table,
};
module_usb_driver(ehset_driver);
MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/misc/ehset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Native support for the I/O-Warrior USB devices
*
* Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH
* written by Christian Lucht <[email protected]> and
* Christoph Jung <[email protected]>
*
* based on
* usb-skeleton.c by Greg Kroah-Hartman <[email protected]>
* brlvger.c by Stephane Dalton <[email protected]>
* and Stephane Doyon <[email protected]>
*
* Released under the GPLv2.
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/usb/iowarrior.h>
#define DRIVER_AUTHOR "Christian Lucht <[email protected]>"
#define DRIVER_DESC "USB IO-Warrior driver"
#define USB_VENDOR_ID_CODEMERCS 1984
/* low speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW40 0x1500
#define USB_DEVICE_ID_CODEMERCS_IOW24 0x1501
#define USB_DEVICE_ID_CODEMERCS_IOWPV1 0x1511
#define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
/* full speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
/* fuller speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
/* OEMed devices */
#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
/* Get a minor range for your devices from the usb maintainer */
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define IOWARRIOR_MINOR_BASE 0
#else
#define IOWARRIOR_MINOR_BASE 208 // SKELETON_MINOR_BASE 192 + 16, not official yet
#endif
/* interrupt input queue size */
#define MAX_INTERRUPT_BUFFER 16
/*
maximum number of urbs that are submitted for writes at the same time,
this applies to the IOWarrior56 only!
IOWarrior24 and IOWarrior40 use synchronous usb_control_msg calls.
*/
#define MAX_WRITES_IN_FLIGHT 4
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static struct usb_driver iowarrior_driver;
/*--------------*/
/* data */
/*--------------*/
/* Structure to hold all of our device specific stuff */
struct iowarrior {
struct mutex mutex; /* locks this structure */
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface; /* the interface for this device */
unsigned char minor; /* the starting minor number for this device */
struct usb_endpoint_descriptor *int_out_endpoint; /* endpoint for reading (needed for IOW56 only) */
struct usb_endpoint_descriptor *int_in_endpoint; /* endpoint for reading */
struct urb *int_in_urb; /* the urb for reading data */
unsigned char *int_in_buffer; /* buffer for data to be read */
unsigned char serial_number; /* to detect lost packages */
unsigned char *read_queue; /* size is MAX_INTERRUPT_BUFFER * packet size */
wait_queue_head_t read_wait;
wait_queue_head_t write_wait; /* wait-queue for writing to the device */
atomic_t write_busy; /* number of write-urbs submitted */
atomic_t read_idx;
atomic_t intr_idx;
atomic_t overflow_flag; /* signals an index 'rollover' */
int present; /* this is 1 as long as the device is connected */
int opened; /* this is 1 if the device is currently open */
char chip_serial[9]; /* the serial number string of the chip connected */
int report_size; /* number of bytes in a report */
u16 product_id;
struct usb_anchor submitted;
};
/*--------------*/
/* globals */
/*--------------*/
#define USB_REQ_GET_REPORT 0x01
//#if 0
static int usb_get_report(struct usb_device *dev,
struct usb_host_interface *inter, unsigned char type,
unsigned char id, void *buf, int size)
{
return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_REPORT,
USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, (type << 8) + id,
inter->desc.bInterfaceNumber, buf, size,
USB_CTRL_GET_TIMEOUT);
}
//#endif
#define USB_REQ_SET_REPORT 0x09
static int usb_set_report(struct usb_interface *intf, unsigned char type,
unsigned char id, void *buf, int size)
{
return usb_control_msg(interface_to_usbdev(intf),
usb_sndctrlpipe(interface_to_usbdev(intf), 0),
USB_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
(type << 8) + id,
intf->cur_altsetting->desc.bInterfaceNumber, buf,
size, 1000);
}
/*---------------------*/
/* driver registration */
/*---------------------*/
/* table of devices that work with this driver */
static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, iowarrior_ids);
/*
* USB callback handler for reading data
*/
static void iowarrior_callback(struct urb *urb)
{
struct iowarrior *dev = urb->context;
int intr_idx;
int read_idx;
int aux_idx;
int offset;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
return;
default:
goto exit;
}
intr_idx = atomic_read(&dev->intr_idx);
/* aux_idx become previous intr_idx */
aux_idx = (intr_idx == 0) ? (MAX_INTERRUPT_BUFFER - 1) : (intr_idx - 1);
read_idx = atomic_read(&dev->read_idx);
/* queue is not empty and it's interface 0 */
if ((intr_idx != read_idx)
&& (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0)) {
/* + 1 for serial number */
offset = aux_idx * (dev->report_size + 1);
if (!memcmp
(dev->read_queue + offset, urb->transfer_buffer,
dev->report_size)) {
/* equal values on interface 0 will be ignored */
goto exit;
}
}
/* aux_idx become next intr_idx */
aux_idx = (intr_idx == (MAX_INTERRUPT_BUFFER - 1)) ? 0 : (intr_idx + 1);
if (read_idx == aux_idx) {
/* queue full, dropping oldest input */
read_idx = (++read_idx == MAX_INTERRUPT_BUFFER) ? 0 : read_idx;
atomic_set(&dev->read_idx, read_idx);
atomic_set(&dev->overflow_flag, 1);
}
/* +1 for serial number */
offset = intr_idx * (dev->report_size + 1);
memcpy(dev->read_queue + offset, urb->transfer_buffer,
dev->report_size);
*(dev->read_queue + offset + (dev->report_size)) = dev->serial_number++;
atomic_set(&dev->intr_idx, aux_idx);
/* tell the blocking read about the new data */
wake_up_interruptible(&dev->read_wait);
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
/*
* USB Callback handler for write-ops
*/
static void iowarrior_write_callback(struct urb *urb)
{
struct iowarrior *dev;
int status = urb->status;
dev = urb->context;
/* sync/async unlink faults aren't errors */
if (status &&
!(status == -ENOENT ||
status == -ECONNRESET || status == -ESHUTDOWN)) {
dev_dbg(&dev->interface->dev,
"nonzero write bulk status received: %d\n", status);
}
/* free up our allocated buffer */
usb_free_coherent(urb->dev, urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma);
/* tell a waiting writer the interrupt-out-pipe is available again */
atomic_dec(&dev->write_busy);
wake_up_interruptible(&dev->write_wait);
}
/*
* iowarrior_delete
*/
static inline void iowarrior_delete(struct iowarrior *dev)
{
dev_dbg(&dev->interface->dev, "minor %d\n", dev->minor);
kfree(dev->int_in_buffer);
usb_free_urb(dev->int_in_urb);
kfree(dev->read_queue);
usb_put_intf(dev->interface);
kfree(dev);
}
/*---------------------*/
/* fops implementation */
/*---------------------*/
static int read_index(struct iowarrior *dev)
{
int intr_idx, read_idx;
read_idx = atomic_read(&dev->read_idx);
intr_idx = atomic_read(&dev->intr_idx);
return (read_idx == intr_idx ? -1 : read_idx);
}
/*
* iowarrior_read
*/
static ssize_t iowarrior_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct iowarrior *dev;
int read_idx;
int offset;
dev = file->private_data;
/* verify that the device wasn't unplugged */
if (!dev || !dev->present)
return -ENODEV;
dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n",
dev->minor, count);
/* read count must be packet size (+ time stamp) */
if ((count != dev->report_size)
&& (count != (dev->report_size + 1)))
return -EINVAL;
/* repeat until no buffer overrun in callback handler occur */
do {
atomic_set(&dev->overflow_flag, 0);
if ((read_idx = read_index(dev)) == -1) {
/* queue empty */
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
else {
//next line will return when there is either new data, or the device is unplugged
int r = wait_event_interruptible(dev->read_wait,
(!dev->present
|| (read_idx =
read_index
(dev)) !=
-1));
if (r) {
//we were interrupted by a signal
return -ERESTART;
}
if (!dev->present) {
//The device was unplugged
return -ENODEV;
}
if (read_idx == -1) {
// Can this happen ???
return 0;
}
}
}
offset = read_idx * (dev->report_size + 1);
if (copy_to_user(buffer, dev->read_queue + offset, count)) {
return -EFAULT;
}
} while (atomic_read(&dev->overflow_flag));
read_idx = ++read_idx == MAX_INTERRUPT_BUFFER ? 0 : read_idx;
atomic_set(&dev->read_idx, read_idx);
return count;
}
/*
* iowarrior_write
*/
static ssize_t iowarrior_write(struct file *file,
const char __user *user_buffer,
size_t count, loff_t *ppos)
{
struct iowarrior *dev;
int retval = 0;
char *buf = NULL; /* for IOW24 and IOW56 we need a buffer */
struct urb *int_out_urb = NULL;
dev = file->private_data;
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
if (!dev->present) {
retval = -ENODEV;
goto exit;
}
dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n",
dev->minor, count);
/* if count is 0 we're already done */
if (count == 0) {
retval = 0;
goto exit;
}
/* We only accept full reports */
if (count != dev->report_size) {
retval = -EINVAL;
goto exit;
}
switch (dev->product_id) {
case USB_DEVICE_ID_CODEMERCS_IOW24:
case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
case USB_DEVICE_ID_CODEMERCS_IOWPV1:
case USB_DEVICE_ID_CODEMERCS_IOWPV2:
case USB_DEVICE_ID_CODEMERCS_IOW40:
/* IOW24 and IOW40 use a synchronous call */
buf = memdup_user(user_buffer, count);
if (IS_ERR(buf)) {
retval = PTR_ERR(buf);
goto exit;
}
retval = usb_set_report(dev->interface, 2, 0, buf, count);
kfree(buf);
goto exit;
case USB_DEVICE_ID_CODEMERCS_IOW56:
case USB_DEVICE_ID_CODEMERCS_IOW56AM:
case USB_DEVICE_ID_CODEMERCS_IOW28:
case USB_DEVICE_ID_CODEMERCS_IOW28L:
case USB_DEVICE_ID_CODEMERCS_IOW100:
/* The IOW56 uses asynchronous IO and more urbs */
if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
/* Wait until we are below the limit for submitted urbs */
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto exit;
} else {
retval = wait_event_interruptible(dev->write_wait,
(!dev->present || (atomic_read (&dev-> write_busy) < MAX_WRITES_IN_FLIGHT)));
if (retval) {
/* we were interrupted by a signal */
retval = -ERESTART;
goto exit;
}
if (!dev->present) {
/* The device was unplugged */
retval = -ENODEV;
goto exit;
}
if (!dev->opened) {
/* We were closed while waiting for an URB */
retval = -ENODEV;
goto exit;
}
}
}
atomic_inc(&dev->write_busy);
int_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!int_out_urb) {
retval = -ENOMEM;
goto error_no_urb;
}
buf = usb_alloc_coherent(dev->udev, dev->report_size,
GFP_KERNEL, &int_out_urb->transfer_dma);
if (!buf) {
retval = -ENOMEM;
dev_dbg(&dev->interface->dev,
"Unable to allocate buffer\n");
goto error_no_buffer;
}
usb_fill_int_urb(int_out_urb, dev->udev,
usb_sndintpipe(dev->udev,
dev->int_out_endpoint->bEndpointAddress),
buf, dev->report_size,
iowarrior_write_callback, dev,
dev->int_out_endpoint->bInterval);
int_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (copy_from_user(buf, user_buffer, count)) {
retval = -EFAULT;
goto error;
}
usb_anchor_urb(int_out_urb, &dev->submitted);
retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
if (retval) {
dev_dbg(&dev->interface->dev,
"submit error %d for urb nr.%d\n",
retval, atomic_read(&dev->write_busy));
usb_unanchor_urb(int_out_urb);
goto error;
}
/* submit was ok */
retval = count;
usb_free_urb(int_out_urb);
goto exit;
default:
/* what do we have here ? An unsupported Product-ID ? */
dev_err(&dev->interface->dev, "%s - not supported for product=0x%x\n",
__func__, dev->product_id);
retval = -EFAULT;
goto exit;
}
error:
usb_free_coherent(dev->udev, dev->report_size, buf,
int_out_urb->transfer_dma);
error_no_buffer:
usb_free_urb(int_out_urb);
error_no_urb:
atomic_dec(&dev->write_busy);
wake_up_interruptible(&dev->write_wait);
exit:
mutex_unlock(&dev->mutex);
return retval;
}
/*
* iowarrior_ioctl
*/
static long iowarrior_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct iowarrior *dev = NULL;
__u8 *buffer;
__u8 __user *user_buffer;
int retval;
int io_res; /* checks for bytes read/written and copy_to/from_user results */
dev = file->private_data;
if (!dev)
return -ENODEV;
buffer = kzalloc(dev->report_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&dev->mutex);
/* verify that the device wasn't unplugged */
if (!dev->present) {
retval = -ENODEV;
goto error_out;
}
dev_dbg(&dev->interface->dev, "minor %d, cmd 0x%.4x, arg %ld\n",
dev->minor, cmd, arg);
retval = 0;
io_res = 0;
switch (cmd) {
case IOW_WRITE:
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
user_buffer = (__u8 __user *)arg;
io_res = copy_from_user(buffer, user_buffer,
dev->report_size);
if (io_res) {
retval = -EFAULT;
} else {
io_res = usb_set_report(dev->interface, 2, 0,
buffer,
dev->report_size);
if (io_res < 0)
retval = io_res;
}
} else {
retval = -EINVAL;
dev_err(&dev->interface->dev,
"ioctl 'IOW_WRITE' is not supported for product=0x%x.\n",
dev->product_id);
}
break;
case IOW_READ:
user_buffer = (__u8 __user *)arg;
io_res = usb_get_report(dev->udev,
dev->interface->cur_altsetting, 1, 0,
buffer, dev->report_size);
if (io_res < 0)
retval = io_res;
else {
io_res = copy_to_user(user_buffer, buffer, dev->report_size);
if (io_res)
retval = -EFAULT;
}
break;
case IOW_GETINFO:
{
/* Report available information for the device */
struct iowarrior_info info;
/* needed for power consumption */
struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
memset(&info, 0, sizeof(info));
/* directly from the descriptor */
info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
info.product = dev->product_id;
info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
info.speed = dev->udev->speed;
info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
info.report_size = dev->report_size;
/* serial number string has been read earlier 8 chars or empty string */
memcpy(info.serial, dev->chip_serial,
sizeof(dev->chip_serial));
if (cfg_descriptor == NULL) {
info.power = -1; /* no information available */
} else {
/* the MaxPower is stored in units of 2mA to make it fit into a byte-value */
info.power = cfg_descriptor->bMaxPower * 2;
}
io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
sizeof(struct iowarrior_info));
if (io_res)
retval = -EFAULT;
break;
}
default:
/* return that we did not understand this ioctl call */
retval = -ENOTTY;
break;
}
error_out:
/* unlock the device */
mutex_unlock(&dev->mutex);
kfree(buffer);
return retval;
}
/*
* iowarrior_open
*/
static int iowarrior_open(struct inode *inode, struct file *file)
{
struct iowarrior *dev = NULL;
struct usb_interface *interface;
int subminor;
int retval = 0;
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
pr_err("%s - error, can't find device for minor %d\n",
__func__, subminor);
return -ENODEV;
}
dev = usb_get_intfdata(interface);
if (!dev)
return -ENODEV;
mutex_lock(&dev->mutex);
/* Only one process can open each device, no sharing. */
if (dev->opened) {
retval = -EBUSY;
goto out;
}
/* setup interrupt handler for receiving values */
if ((retval = usb_submit_urb(dev->int_in_urb, GFP_KERNEL)) < 0) {
dev_err(&interface->dev, "Error %d while submitting URB\n", retval);
retval = -EFAULT;
goto out;
}
/* increment our usage count for the driver */
++dev->opened;
/* save our object in the file's private structure */
file->private_data = dev;
retval = 0;
out:
mutex_unlock(&dev->mutex);
return retval;
}
/*
* iowarrior_release
*/
static int iowarrior_release(struct inode *inode, struct file *file)
{
struct iowarrior *dev;
int retval = 0;
dev = file->private_data;
if (!dev)
return -ENODEV;
dev_dbg(&dev->interface->dev, "minor %d\n", dev->minor);
/* lock our device */
mutex_lock(&dev->mutex);
if (dev->opened <= 0) {
retval = -ENODEV; /* close called more than once */
mutex_unlock(&dev->mutex);
} else {
dev->opened = 0; /* we're closing now */
retval = 0;
if (dev->present) {
/*
The device is still connected so we only shutdown
pending read-/write-ops.
*/
usb_kill_urb(dev->int_in_urb);
wake_up_interruptible(&dev->read_wait);
wake_up_interruptible(&dev->write_wait);
mutex_unlock(&dev->mutex);
} else {
/* The device was unplugged, cleanup resources */
mutex_unlock(&dev->mutex);
iowarrior_delete(dev);
}
}
return retval;
}
static __poll_t iowarrior_poll(struct file *file, poll_table * wait)
{
struct iowarrior *dev = file->private_data;
__poll_t mask = 0;
if (!dev->present)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
if (!dev->present)
return EPOLLERR | EPOLLHUP;
if (read_index(dev) != -1)
mask |= EPOLLIN | EPOLLRDNORM;
if (atomic_read(&dev->write_busy) < MAX_WRITES_IN_FLIGHT)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
/*
* File operations needed when we register this driver.
* This assumes that this driver NEEDS file operations,
* of course, which means that the driver is expected
* to have a node in the /dev directory. If the USB
* device were for a network interface then the driver
* would use "struct net_driver" instead, and a serial
* device would use "struct tty_driver".
*/
static const struct file_operations iowarrior_fops = {
.owner = THIS_MODULE,
.write = iowarrior_write,
.read = iowarrior_read,
.unlocked_ioctl = iowarrior_ioctl,
.open = iowarrior_open,
.release = iowarrior_release,
.poll = iowarrior_poll,
.llseek = noop_llseek,
};
static char *iowarrior_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with devfs and the driver core
*/
static struct usb_class_driver iowarrior_class = {
.name = "iowarrior%d",
.devnode = iowarrior_devnode,
.fops = &iowarrior_fops,
.minor_base = IOWARRIOR_MINOR_BASE,
};
/*---------------------------------*/
/* probe and disconnect functions */
/*---------------------------------*/
/*
* iowarrior_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int iowarrior_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct iowarrior *dev = NULL;
struct usb_host_interface *iface_desc;
int retval = -ENOMEM;
int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
if (!dev)
return retval;
mutex_init(&dev->mutex);
atomic_set(&dev->intr_idx, 0);
atomic_set(&dev->read_idx, 0);
atomic_set(&dev->overflow_flag, 0);
init_waitqueue_head(&dev->read_wait);
atomic_set(&dev->write_busy, 0);
init_waitqueue_head(&dev->write_wait);
dev->udev = udev;
dev->interface = usb_get_intf(interface);
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
init_usb_anchor(&dev->submitted);
res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
if (res) {
dev_err(&interface->dev, "no interrupt-in endpoint found\n");
retval = res;
goto error;
}
if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
(dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
res = usb_find_last_int_out_endpoint(iface_desc,
&dev->int_out_endpoint);
if (res) {
dev_err(&interface->dev, "no interrupt-out endpoint found\n");
retval = res;
goto error;
}
}
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
/*
* Some devices need the report size to be different than the
* endpoint size.
*/
if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
switch (dev->product_id) {
case USB_DEVICE_ID_CODEMERCS_IOW56:
case USB_DEVICE_ID_CODEMERCS_IOW56AM:
dev->report_size = 7;
break;
case USB_DEVICE_ID_CODEMERCS_IOW28:
case USB_DEVICE_ID_CODEMERCS_IOW28L:
dev->report_size = 4;
break;
case USB_DEVICE_ID_CODEMERCS_IOW100:
dev->report_size = 12;
break;
}
}
/* create the urb and buffer for reading */
dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->int_in_urb)
goto error;
dev->int_in_buffer = kmalloc(dev->report_size, GFP_KERNEL);
if (!dev->int_in_buffer)
goto error;
usb_fill_int_urb(dev->int_in_urb, dev->udev,
usb_rcvintpipe(dev->udev,
dev->int_in_endpoint->bEndpointAddress),
dev->int_in_buffer, dev->report_size,
iowarrior_callback, dev,
dev->int_in_endpoint->bInterval);
/* create an internal buffer for interrupt data from the device */
dev->read_queue =
kmalloc_array(dev->report_size + 1, MAX_INTERRUPT_BUFFER,
GFP_KERNEL);
if (!dev->read_queue)
goto error;
/* Get the serial-number of the chip */
memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial));
usb_string(udev, udev->descriptor.iSerialNumber, dev->chip_serial,
sizeof(dev->chip_serial));
if (strlen(dev->chip_serial) != 8)
memset(dev->chip_serial, 0x00, sizeof(dev->chip_serial));
/* Set the idle timeout to 0, if this is interface 0 */
if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x0A,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
0, NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/* allow device read and ioctl */
dev->present = 1;
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
retval = usb_register_dev(interface, &iowarrior_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(&interface->dev, "Not able to get a minor for this device.\n");
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "IOWarrior product=0x%x, serial=%s interface=%d "
"now attached to iowarrior%d\n", dev->product_id, dev->chip_serial,
iface_desc->desc.bInterfaceNumber, dev->minor - IOWARRIOR_MINOR_BASE);
return retval;
error:
iowarrior_delete(dev);
return retval;
}
/*
* iowarrior_disconnect
*
* Called by the usb core when the device is removed from the system.
*/
static void iowarrior_disconnect(struct usb_interface *interface)
{
struct iowarrior *dev = usb_get_intfdata(interface);
int minor = dev->minor;
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
/* prevent device read, write and ioctl */
dev->present = 0;
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
so we only shutdown read-/write-ops going on.
Deleting the device is postponed until close() was called.
*/
usb_kill_urb(dev->int_in_urb);
usb_kill_anchored_urbs(&dev->submitted);
wake_up_interruptible(&dev->read_wait);
wake_up_interruptible(&dev->write_wait);
mutex_unlock(&dev->mutex);
} else {
/* no process is using the device, cleanup now */
mutex_unlock(&dev->mutex);
iowarrior_delete(dev);
}
dev_info(&interface->dev, "I/O-Warror #%d now disconnected\n",
minor - IOWARRIOR_MINOR_BASE);
}
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver iowarrior_driver = {
.name = "iowarrior",
.probe = iowarrior_probe,
.disconnect = iowarrior_disconnect,
.id_table = iowarrior_ids,
};
module_usb_driver(iowarrior_driver);
| linux-master | drivers/usb/misc/iowarrior.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB3503 USB 2.0 hub controller driver
*
* Copyright (c) 2012-2013 Dongjin Kim ([email protected])
*/
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
#include <linux/regmap.h>
#define USB3503_VIDL 0x00
#define USB3503_VIDM 0x01
#define USB3503_PIDL 0x02
#define USB3503_PIDM 0x03
#define USB3503_DIDL 0x04
#define USB3503_DIDM 0x05
#define USB3503_CFG1 0x06
#define USB3503_SELF_BUS_PWR (1 << 7)
#define USB3503_CFG2 0x07
#define USB3503_CFG3 0x08
#define USB3503_NRD 0x09
#define USB3503_PDS 0x0a
#define USB3503_SP_ILOCK 0xe7
#define USB3503_SPILOCK_CONNECT (1 << 1)
#define USB3503_SPILOCK_CONFIG (1 << 0)
#define USB3503_CFGP 0xee
#define USB3503_CLKSUSP (1 << 7)
#define USB3503_RESET 0xff
struct usb3503 {
enum usb3503_mode mode;
struct regmap *regmap;
struct device *dev;
struct clk *clk;
u8 port_off_mask;
struct gpio_desc *bypass;
struct gpio_desc *intn;
struct gpio_desc *reset;
struct gpio_desc *connect;
bool secondary_ref_clk;
};
static int usb3503_connect(struct usb3503 *hub)
{
struct device *dev = hub->dev;
int err;
if (hub->regmap) {
/* SP_ILOCK: set connect_n, config_n for config */
err = regmap_write(hub->regmap, USB3503_SP_ILOCK,
(USB3503_SPILOCK_CONNECT
| USB3503_SPILOCK_CONFIG));
if (err < 0) {
dev_err(dev, "SP_ILOCK failed (%d)\n", err);
return err;
}
/* PDS : Set the ports which are disabled in self-powered mode. */
if (hub->port_off_mask) {
err = regmap_update_bits(hub->regmap, USB3503_PDS,
hub->port_off_mask,
hub->port_off_mask);
if (err < 0) {
dev_err(dev, "PDS failed (%d)\n", err);
return err;
}
}
/* CFG1 : Set SELF_BUS_PWR, this enables self-powered operation. */
err = regmap_update_bits(hub->regmap, USB3503_CFG1,
USB3503_SELF_BUS_PWR,
USB3503_SELF_BUS_PWR);
if (err < 0) {
dev_err(dev, "CFG1 failed (%d)\n", err);
return err;
}
/* SP_LOCK: clear connect_n, config_n for hub connect */
err = regmap_update_bits(hub->regmap, USB3503_SP_ILOCK,
(USB3503_SPILOCK_CONNECT
| USB3503_SPILOCK_CONFIG), 0);
if (err < 0) {
dev_err(dev, "SP_ILOCK failed (%d)\n", err);
return err;
}
}
if (hub->connect)
gpiod_set_value_cansleep(hub->connect, 1);
hub->mode = USB3503_MODE_HUB;
dev_info(dev, "switched to HUB mode\n");
return 0;
}
static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
{
struct device *dev = hub->dev;
int rst, bypass, conn;
switch (mode) {
case USB3503_MODE_HUB:
conn = 1;
rst = 0;
bypass = 0;
break;
case USB3503_MODE_STANDBY:
conn = 0;
rst = 1;
bypass = 1;
dev_info(dev, "switched to STANDBY mode\n");
break;
case USB3503_MODE_BYPASS:
conn = 0;
rst = 0;
bypass = 1;
break;
default:
dev_err(dev, "unknown mode is requested\n");
return -EINVAL;
}
if (!conn && hub->connect)
gpiod_set_value_cansleep(hub->connect, 0);
if (hub->reset)
gpiod_set_value_cansleep(hub->reset, rst);
if (hub->bypass)
gpiod_set_value_cansleep(hub->bypass, bypass);
if (conn) {
/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
usleep_range(4000, 10000);
return usb3503_connect(hub);
}
return 0;
}
static const struct regmap_config usb3503_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = USB3503_RESET,
};
static int usb3503_probe(struct usb3503 *hub)
{
struct device *dev = hub->dev;
struct usb3503_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
int err;
bool is_clk_enabled = false;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
enum gpiod_flags flags;
int len;
if (pdata) {
hub->port_off_mask = pdata->port_off_mask;
hub->mode = pdata->initial_mode;
} else if (np) {
u32 rate = 0;
hub->port_off_mask = 0;
if (!of_property_read_u32(np, "refclk-frequency", &rate)) {
switch (rate) {
case 38400000:
case 26000000:
case 19200000:
case 12000000:
hub->secondary_ref_clk = 0;
break;
case 24000000:
case 27000000:
case 25000000:
case 50000000:
hub->secondary_ref_clk = 1;
break;
default:
dev_err(dev,
"unsupported reference clock rate (%d)\n",
(int) rate);
return -EINVAL;
}
}
hub->clk = devm_clk_get_optional(dev, "refclk");
if (IS_ERR(hub->clk)) {
dev_err(dev, "unable to request refclk (%ld)\n",
PTR_ERR(hub->clk));
return PTR_ERR(hub->clk);
}
if (rate != 0) {
err = clk_set_rate(hub->clk, rate);
if (err) {
dev_err(dev,
"unable to set reference clock rate to %d\n",
(int)rate);
return err;
}
}
err = clk_prepare_enable(hub->clk);
if (err) {
dev_err(dev, "unable to enable reference clock\n");
return err;
}
is_clk_enabled = true;
property = of_get_property(np, "disabled-ports", &len);
if (property && (len / sizeof(u32)) > 0) {
int i;
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(property[i]);
if ((1 <= port) && (port <= 3))
hub->port_off_mask |= (1 << port);
}
}
of_property_read_u32(np, "initial-mode", &mode);
hub->mode = mode;
}
if (hub->secondary_ref_clk)
flags = GPIOD_OUT_LOW;
else
flags = GPIOD_OUT_HIGH;
hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
if (IS_ERR(hub->intn)) {
err = PTR_ERR(hub->intn);
goto err_clk;
}
if (hub->intn)
gpiod_set_consumer_name(hub->intn, "usb3503 intn");
hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
if (IS_ERR(hub->connect)) {
err = PTR_ERR(hub->connect);
goto err_clk;
}
if (hub->connect)
gpiod_set_consumer_name(hub->connect, "usb3503 connect");
hub->bypass = devm_gpiod_get_optional(dev, "bypass", GPIOD_OUT_HIGH);
if (IS_ERR(hub->bypass)) {
err = PTR_ERR(hub->bypass);
goto err_clk;
}
if (hub->bypass)
gpiod_set_consumer_name(hub->bypass, "usb3503 bypass");
hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(hub->reset)) {
err = PTR_ERR(hub->reset);
goto err_clk;
}
if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
gpiod_set_consumer_name(hub->reset, "usb3503 reset");
}
if (hub->port_off_mask && !hub->regmap)
dev_err(dev, "Ports disabled with no control interface\n");
usb3503_switch_mode(hub, hub->mode);
dev_info(dev, "%s: probed in %s mode\n", __func__,
(hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
return 0;
err_clk:
if (is_clk_enabled)
clk_disable_unprepare(hub->clk);
return err;
}
static int usb3503_i2c_probe(struct i2c_client *i2c)
{
struct usb3503 *hub;
int err;
hub = devm_kzalloc(&i2c->dev, sizeof(struct usb3503), GFP_KERNEL);
if (!hub)
return -ENOMEM;
i2c_set_clientdata(i2c, hub);
hub->regmap = devm_regmap_init_i2c(i2c, &usb3503_regmap_config);
if (IS_ERR(hub->regmap)) {
err = PTR_ERR(hub->regmap);
dev_err(&i2c->dev, "Failed to initialise regmap: %d\n", err);
return err;
}
hub->dev = &i2c->dev;
return usb3503_probe(hub);
}
static void usb3503_i2c_remove(struct i2c_client *i2c)
{
struct usb3503 *hub;
hub = i2c_get_clientdata(i2c);
clk_disable_unprepare(hub->clk);
}
static int usb3503_platform_probe(struct platform_device *pdev)
{
struct usb3503 *hub;
hub = devm_kzalloc(&pdev->dev, sizeof(struct usb3503), GFP_KERNEL);
if (!hub)
return -ENOMEM;
hub->dev = &pdev->dev;
platform_set_drvdata(pdev, hub);
return usb3503_probe(hub);
}
static void usb3503_platform_remove(struct platform_device *pdev)
{
struct usb3503 *hub;
hub = platform_get_drvdata(pdev);
clk_disable_unprepare(hub->clk);
}
static int __maybe_unused usb3503_suspend(struct usb3503 *hub)
{
usb3503_switch_mode(hub, USB3503_MODE_STANDBY);
clk_disable_unprepare(hub->clk);
return 0;
}
static int __maybe_unused usb3503_resume(struct usb3503 *hub)
{
clk_prepare_enable(hub->clk);
usb3503_switch_mode(hub, hub->mode);
return 0;
}
static int __maybe_unused usb3503_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return usb3503_suspend(i2c_get_clientdata(client));
}
static int __maybe_unused usb3503_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
return usb3503_resume(i2c_get_clientdata(client));
}
static int __maybe_unused usb3503_platform_suspend(struct device *dev)
{
return usb3503_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused usb3503_platform_resume(struct device *dev)
{
return usb3503_resume(dev_get_drvdata(dev));
}
static SIMPLE_DEV_PM_OPS(usb3503_i2c_pm_ops, usb3503_i2c_suspend,
usb3503_i2c_resume);
static SIMPLE_DEV_PM_OPS(usb3503_platform_pm_ops, usb3503_platform_suspend,
usb3503_platform_resume);
static const struct i2c_device_id usb3503_id[] = {
{ USB3503_I2C_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, usb3503_id);
#ifdef CONFIG_OF
static const struct of_device_id usb3503_of_match[] = {
{ .compatible = "smsc,usb3503", },
{ .compatible = "smsc,usb3503a", },
{ .compatible = "smsc,usb3803", },
{},
};
MODULE_DEVICE_TABLE(of, usb3503_of_match);
#endif
static struct i2c_driver usb3503_i2c_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.pm = pm_ptr(&usb3503_i2c_pm_ops),
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_i2c_probe,
.remove = usb3503_i2c_remove,
.id_table = usb3503_id,
};
static struct platform_driver usb3503_platform_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
.pm = pm_ptr(&usb3503_platform_pm_ops),
},
.probe = usb3503_platform_probe,
.remove_new = usb3503_platform_remove,
};
static int __init usb3503_init(void)
{
int err;
err = i2c_add_driver(&usb3503_i2c_driver);
if (err) {
pr_err("usb3503: Failed to register I2C driver: %d\n", err);
return err;
}
err = platform_driver_register(&usb3503_platform_driver);
if (err) {
pr_err("usb3503: Failed to register platform driver: %d\n",
err);
i2c_del_driver(&usb3503_i2c_driver);
return err;
}
return 0;
}
module_init(usb3503_init);
static void __exit usb3503_exit(void)
{
platform_driver_unregister(&usb3503_platform_driver);
i2c_del_driver(&usb3503_i2c_driver);
}
module_exit(usb3503_exit);
MODULE_AUTHOR("Dongjin Kim <[email protected]>");
MODULE_DESCRIPTION("USB3503 USB HUB driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/usb3503.c |
// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
* uss720.c -- USS720 USB Parport Cable.
*
* Copyright (C) 1999, 2005, 2010
* Thomas Sailer ([email protected])
*
* Based on parport_pc.c
*
* History:
* 0.1 04.08.1999 Created
* 0.2 07.08.1999 Some fixes mainly suggested by Tim Waugh
* Interrupt handling currently disabled because
* usb_request_irq crashes somewhere within ohci.c
* for no apparent reason (that is for me, anyway)
* ECP currently untested
* 0.3 10.08.1999 fixing merge errors
* 0.4 13.08.1999 Added Vendor/Product ID of Brad Hard's cable
* 0.5 20.09.1999 usb_control_msg wrapper used
* Nov01.2000 usb_device_table support by Adam J. Richter
* 08.04.2001 Identify version on module load. gb
* 0.6 02.09.2005 Fix "scheduling in interrupt" problem by making save/restore
* context asynchronous
*
*/
/*****************************************************************************/
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/parport.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#define DRIVER_AUTHOR "Thomas M. Sailer, [email protected]"
#define DRIVER_DESC "USB Parport Cable driver for Cables using the Lucent Technologies USS720 Chip"
/* --------------------------------------------------------------------- */
struct parport_uss720_private {
struct usb_device *usbdev;
struct parport *pp;
struct kref ref_count;
__u8 reg[7]; /* USB registers */
struct list_head asynclist;
spinlock_t asynclock;
};
struct uss720_async_request {
struct parport_uss720_private *priv;
struct kref ref_count;
struct list_head asynclist;
struct completion compl;
struct urb *urb;
struct usb_ctrlrequest *dr;
__u8 reg[7];
};
/* --------------------------------------------------------------------- */
static void destroy_priv(struct kref *kref)
{
struct parport_uss720_private *priv = container_of(kref, struct parport_uss720_private, ref_count);
dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n");
usb_put_dev(priv->usbdev);
priv->usbdev = NULL;
kfree(priv);
}
static void destroy_async(struct kref *kref)
{
struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count);
struct parport_uss720_private *priv = rq->priv;
unsigned long flags;
if (likely(rq->urb))
usb_free_urb(rq->urb);
kfree(rq->dr);
spin_lock_irqsave(&priv->asynclock, flags);
list_del_init(&rq->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
kfree(rq);
kref_put(&priv->ref_count, destroy_priv);
}
/* --------------------------------------------------------------------- */
static void async_complete(struct urb *urb)
{
struct uss720_async_request *rq;
struct parport *pp;
struct parport_uss720_private *priv;
int status = urb->status;
rq = urb->context;
priv = rq->priv;
pp = priv->pp;
if (status) {
dev_err(&urb->dev->dev, "async_complete: urb error %d\n",
status);
} else if (rq->dr->bRequest == 3) {
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
#if 0
dev_dbg(&priv->usbdev->dev, "async_complete regs %7ph\n",
priv->reg);
#endif
/* if nAck interrupts are enabled and we have an interrupt, call the interrupt procedure */
if (rq->reg[2] & rq->reg[1] & 0x10 && pp)
parport_generic_irq(pp);
}
complete(&rq->compl);
kref_put(&rq->ref_count, destroy_async);
}
static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
gfp_t mem_flags)
{
struct usb_device *usbdev;
struct uss720_async_request *rq;
unsigned long flags;
int ret;
if (!priv)
return NULL;
usbdev = priv->usbdev;
if (!usbdev)
return NULL;
rq = kzalloc(sizeof(struct uss720_async_request), mem_flags);
if (!rq)
return NULL;
kref_init(&rq->ref_count);
INIT_LIST_HEAD(&rq->asynclist);
init_completion(&rq->compl);
kref_get(&priv->ref_count);
rq->priv = priv;
rq->urb = usb_alloc_urb(0, mem_flags);
if (!rq->urb) {
kref_put(&rq->ref_count, destroy_async);
return NULL;
}
rq->dr = kmalloc(sizeof(*rq->dr), mem_flags);
if (!rq->dr) {
kref_put(&rq->ref_count, destroy_async);
return NULL;
}
rq->dr->bRequestType = requesttype;
rq->dr->bRequest = request;
rq->dr->wValue = cpu_to_le16(value);
rq->dr->wIndex = cpu_to_le16(index);
rq->dr->wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
usb_fill_control_urb(rq->urb, usbdev, (requesttype & 0x80) ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0),
(unsigned char *)rq->dr,
(request == 3) ? rq->reg : NULL, (request == 3) ? sizeof(rq->reg) : 0, async_complete, rq);
/* rq->urb->transfer_flags |= URB_ASYNC_UNLINK; */
spin_lock_irqsave(&priv->asynclock, flags);
list_add_tail(&rq->asynclist, &priv->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
kref_get(&rq->ref_count);
ret = usb_submit_urb(rq->urb, mem_flags);
if (!ret)
return rq;
destroy_async(&rq->ref_count);
dev_err(&usbdev->dev, "submit_async_request submit_urb failed with %d\n", ret);
return NULL;
}
static unsigned int kill_all_async_requests_priv(struct parport_uss720_private *priv)
{
struct uss720_async_request *rq;
unsigned long flags;
unsigned int ret = 0;
spin_lock_irqsave(&priv->asynclock, flags);
list_for_each_entry(rq, &priv->asynclist, asynclist) {
usb_unlink_urb(rq->urb);
ret++;
}
spin_unlock_irqrestore(&priv->asynclock, flags);
return ret;
}
/* --------------------------------------------------------------------- */
static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
static const unsigned char regindex[9] = {
4, 0, 1, 5, 5, 0, 2, 3, 6
};
int ret;
if (!pp)
return -EIO;
priv = pp->private_data;
rq = submit_async_request(priv, 3, 0xc0, ((unsigned int)reg) << 8, 0, mem_flags);
if (!rq) {
dev_err(&priv->usbdev->dev, "get_1284_register(%u) failed",
(unsigned int)reg);
return -EIO;
}
if (!val) {
kref_put(&rq->ref_count, destroy_async);
return 0;
}
if (wait_for_completion_timeout(&rq->compl, HZ)) {
ret = rq->urb->status;
*val = priv->reg[(reg >= 9) ? 0 : regindex[reg]];
if (ret)
printk(KERN_WARNING "get_1284_register: "
"usb error %d\n", ret);
kref_put(&rq->ref_count, destroy_async);
return ret;
}
printk(KERN_WARNING "get_1284_register timeout\n");
kill_all_async_requests_priv(priv);
return -EIO;
}
static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags)
{
struct parport_uss720_private *priv;
struct uss720_async_request *rq;
if (!pp)
return -EIO;
priv = pp->private_data;
rq = submit_async_request(priv, 4, 0x40, (((unsigned int)reg) << 8) | val, 0, mem_flags);
if (!rq) {
dev_err(&priv->usbdev->dev, "set_1284_register(%u,%u) failed",
(unsigned int)reg, (unsigned int)val);
return -EIO;
}
kref_put(&rq->ref_count, destroy_async);
return 0;
}
/* --------------------------------------------------------------------- */
/* ECR modes */
#define ECR_SPP 00
#define ECR_PS2 01
#define ECR_PPF 02
#define ECR_ECP 03
#define ECR_EPP 04
/* Safely change the mode bits in the ECR */
static int change_mode(struct parport *pp, int m)
{
struct parport_uss720_private *priv = pp->private_data;
int mode;
__u8 reg;
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
/* Bits <7:5> contain the mode. */
mode = (priv->reg[2] >> 5) & 0x7;
if (mode == m)
return 0;
/* We have to go through mode 000 or 001 */
if (mode > ECR_PS2 && m > ECR_PS2)
if (change_mode(pp, ECR_PS2))
return -EIO;
if (m <= ECR_PS2 && !(priv->reg[1] & 0x20)) {
/* This mode resets the FIFO, so we may
* have to wait for it to drain first. */
unsigned long expire = jiffies + pp->physport->cad->timeout;
switch (mode) {
case ECR_PPF: /* Parallel Port FIFO mode */
case ECR_ECP: /* ECP Parallel Port mode */
/* Poll slowly. */
for (;;) {
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
if (priv->reg[2] & 0x01)
break;
if (time_after_eq (jiffies, expire))
/* The FIFO is stuck. */
return -EBUSY;
msleep_interruptible(10);
if (signal_pending (current))
break;
}
}
}
/* Set the mode. */
if (set_1284_register(pp, 6, m << 5, GFP_KERNEL))
return -EIO;
if (get_1284_register(pp, 6, ®, GFP_KERNEL))
return -EIO;
return 0;
}
/*
* Clear TIMEOUT BIT in EPP MODE
*/
static int clear_epp_timeout(struct parport *pp)
{
unsigned char stat;
if (get_1284_register(pp, 1, &stat, GFP_KERNEL))
return 1;
return stat & 1;
}
/*
* Access functions.
*/
#if 0
static int uss720_irq(int usbstatus, void *buffer, int len, void *dev_id)
{
struct parport *pp = (struct parport *)dev_id;
struct parport_uss720_private *priv = pp->private_data;
if (usbstatus != 0 || len < 4 || !buffer)
return 1;
memcpy(priv->reg, buffer, 4);
/* if nAck interrupts are enabled and we have an interrupt, call the interrupt procedure */
if (priv->reg[2] & priv->reg[1] & 0x10)
parport_generic_irq(pp);
return 1;
}
#endif
static void parport_uss720_write_data(struct parport *pp, unsigned char d)
{
set_1284_register(pp, 0, d, GFP_KERNEL);
}
static unsigned char parport_uss720_read_data(struct parport *pp)
{
unsigned char ret;
if (get_1284_register(pp, 0, &ret, GFP_KERNEL))
return 0;
return ret;
}
static void parport_uss720_write_control(struct parport *pp, unsigned char d)
{
struct parport_uss720_private *priv = pp->private_data;
d = (d & 0xf) | (priv->reg[1] & 0xf0);
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static unsigned char parport_uss720_read_control(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
return priv->reg[1] & 0xf; /* Use soft copy */
}
static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned char mask, unsigned char val)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
mask &= 0x0f;
val &= 0x0f;
d = (priv->reg[1] & (~mask)) ^ val;
if (set_1284_register(pp, 2, d, GFP_ATOMIC))
return 0;
priv->reg[1] = d;
return d & 0xf;
}
static unsigned char parport_uss720_read_status(struct parport *pp)
{
unsigned char ret;
if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
return 0;
return ret & 0xf8;
}
static void parport_uss720_disable_irq(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] & ~0x10;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_enable_irq(struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] | 0x10;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_data_forward (struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] & ~0x20;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_data_reverse (struct parport *pp)
{
struct parport_uss720_private *priv = pp->private_data;
unsigned char d;
d = priv->reg[1] | 0x20;
if (set_1284_register(pp, 2, d, GFP_KERNEL))
return;
priv->reg[1] = d;
}
static void parport_uss720_init_state(struct pardevice *dev, struct parport_state *s)
{
s->u.pc.ctr = 0xc | (dev->irq_func ? 0x10 : 0x0);
s->u.pc.ecr = 0x24;
}
static void parport_uss720_save_state(struct parport *pp, struct parport_state *s)
{
struct parport_uss720_private *priv = pp->private_data;
#if 0
if (get_1284_register(pp, 2, NULL, GFP_ATOMIC))
return;
#endif
s->u.pc.ctr = priv->reg[1];
s->u.pc.ecr = priv->reg[2];
}
static void parport_uss720_restore_state(struct parport *pp, struct parport_state *s)
{
struct parport_uss720_private *priv = pp->private_data;
set_1284_register(pp, 2, s->u.pc.ctr, GFP_ATOMIC);
set_1284_register(pp, 6, s->u.pc.ecr, GFP_ATOMIC);
get_1284_register(pp, 2, NULL, GFP_ATOMIC);
priv->reg[1] = s->u.pc.ctr;
priv->reg[2] = s->u.pc.ecr;
}
static size_t parport_uss720_epp_read_data(struct parport *pp, void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t got = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; got < length; got++) {
if (get_1284_register(pp, 4, (char *)buf, GFP_KERNEL))
break;
buf++;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return got;
}
static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf, size_t length, int flags)
{
#if 0
struct parport_uss720_private *priv = pp->private_data;
size_t written = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; written < length; written++) {
if (set_1284_register(pp, 4, (char *)buf, GFP_KERNEL))
break;
((char*)buf)++;
if (get_1284_register(pp, 1, NULL, GFP_KERNEL))
break;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return written;
#else
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen = 0;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_EPP))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buf, length, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buf, length, rlen);
change_mode(pp, ECR_PS2);
return rlen;
#endif
}
static size_t parport_uss720_epp_read_addr(struct parport *pp, void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t got = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; got < length; got++) {
if (get_1284_register(pp, 3, (char *)buf, GFP_KERNEL))
break;
buf++;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return got;
}
static size_t parport_uss720_epp_write_addr(struct parport *pp, const void *buf, size_t length, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
size_t written = 0;
if (change_mode(pp, ECR_EPP))
return 0;
for (; written < length; written++) {
if (set_1284_register(pp, 3, *(char *)buf, GFP_KERNEL))
break;
buf++;
if (get_1284_register(pp, 1, NULL, GFP_KERNEL))
break;
if (priv->reg[0] & 0x01) {
clear_epp_timeout(pp);
break;
}
}
change_mode(pp, ECR_PS2);
return written;
}
static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen = 0;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_ECP))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen = 0;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_ECP))
return 0;
i = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: recvbulk ep 2 buf %p len %zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
static size_t parport_uss720_ecp_write_addr(struct parport *pp, const void *buffer, size_t len, int flags)
{
size_t written = 0;
if (change_mode(pp, ECR_ECP))
return 0;
for (; written < len; written++) {
if (set_1284_register(pp, 5, *(char *)buffer, GFP_KERNEL))
break;
buffer++;
}
change_mode(pp, ECR_PS2);
return written;
}
static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer, size_t len, int flags)
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
int rlen = 0;
int i;
if (!usbdev)
return 0;
if (change_mode(pp, ECR_PPF))
return 0;
i = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), (void *)buffer, len, &rlen, 20000);
if (i)
printk(KERN_ERR "uss720: sendbulk ep 1 buf %p len %zu rlen %u\n", buffer, len, rlen);
change_mode(pp, ECR_PS2);
return rlen;
}
/* --------------------------------------------------------------------- */
static struct parport_operations parport_uss720_ops =
{
.owner = THIS_MODULE,
.write_data = parport_uss720_write_data,
.read_data = parport_uss720_read_data,
.write_control = parport_uss720_write_control,
.read_control = parport_uss720_read_control,
.frob_control = parport_uss720_frob_control,
.read_status = parport_uss720_read_status,
.enable_irq = parport_uss720_enable_irq,
.disable_irq = parport_uss720_disable_irq,
.data_forward = parport_uss720_data_forward,
.data_reverse = parport_uss720_data_reverse,
.init_state = parport_uss720_init_state,
.save_state = parport_uss720_save_state,
.restore_state = parport_uss720_restore_state,
.epp_write_data = parport_uss720_epp_write_data,
.epp_read_data = parport_uss720_epp_read_data,
.epp_write_addr = parport_uss720_epp_write_addr,
.epp_read_addr = parport_uss720_epp_read_addr,
.ecp_write_data = parport_uss720_ecp_write_data,
.ecp_read_data = parport_uss720_ecp_read_data,
.ecp_write_addr = parport_uss720_ecp_write_addr,
.compat_write_data = parport_uss720_write_compat,
.nibble_read_data = parport_ieee1284_read_nibble,
.byte_read_data = parport_ieee1284_read_byte,
};
/* --------------------------------------------------------------------- */
static int uss720_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usbdev = usb_get_dev(interface_to_usbdev(intf));
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *epd;
struct parport_uss720_private *priv;
struct parport *pp;
unsigned char reg;
int i;
dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n",
le16_to_cpu(usbdev->descriptor.idVendor),
le16_to_cpu(usbdev->descriptor.idProduct));
/* our known interfaces have 3 alternate settings */
if (intf->num_altsetting != 3) {
usb_put_dev(usbdev);
return -ENODEV;
}
i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
dev_dbg(&intf->dev, "set interface result %d\n", i);
interface = intf->cur_altsetting;
if (interface->desc.bNumEndpoints < 3) {
usb_put_dev(usbdev);
return -ENODEV;
}
/*
* Allocate parport interface
*/
priv = kzalloc(sizeof(struct parport_uss720_private), GFP_KERNEL);
if (!priv) {
usb_put_dev(usbdev);
return -ENOMEM;
}
priv->pp = NULL;
priv->usbdev = usbdev;
kref_init(&priv->ref_count);
spin_lock_init(&priv->asynclock);
INIT_LIST_HEAD(&priv->asynclist);
pp = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &parport_uss720_ops);
if (!pp) {
printk(KERN_WARNING "uss720: could not register parport\n");
goto probe_abort;
}
priv->pp = pp;
pp->private_data = priv;
pp->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP | PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
/* set the USS720 control register to manual mode, no ECP compression, enable all ints */
set_1284_register(pp, 7, 0x00, GFP_KERNEL);
set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */
set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
/* debugging */
get_1284_register(pp, 0, ®, GFP_KERNEL);
dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
i = usb_find_last_int_in_endpoint(interface, &epd);
if (!i) {
dev_dbg(&intf->dev, "epaddr %d interval %d\n",
epd->bEndpointAddress, epd->bInterval);
}
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
return 0;
probe_abort:
kill_all_async_requests_priv(priv);
kref_put(&priv->ref_count, destroy_priv);
return -ENODEV;
}
static void uss720_disconnect(struct usb_interface *intf)
{
struct parport *pp = usb_get_intfdata(intf);
struct parport_uss720_private *priv;
dev_dbg(&intf->dev, "disconnect\n");
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
priv->pp = NULL;
dev_dbg(&intf->dev, "parport_remove_port\n");
parport_remove_port(pp);
parport_put_port(pp);
kill_all_async_requests_priv(priv);
kref_put(&priv->ref_count, destroy_priv);
}
dev_dbg(&intf->dev, "disconnect done\n");
}
/* table of cables that work through this driver */
static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x047e, 0x1001) },
{ USB_DEVICE(0x04b8, 0x0002) },
{ USB_DEVICE(0x04b8, 0x0003) },
{ USB_DEVICE(0x050d, 0x0002) },
{ USB_DEVICE(0x050d, 0x1202) },
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x05ab, 0x0002) },
{ USB_DEVICE(0x06c6, 0x0100) },
{ USB_DEVICE(0x0729, 0x1284) },
{ USB_DEVICE(0x1293, 0x0002) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, uss720_table);
static struct usb_driver uss720_driver = {
.name = "uss720",
.probe = uss720_probe,
.disconnect = uss720_disconnect,
.id_table = uss720_table,
};
/* --------------------------------------------------------------------- */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static int __init uss720_init(void)
{
int retval;
retval = usb_register(&uss720_driver);
if (retval)
goto out;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
printk(KERN_INFO KBUILD_MODNAME ": NOTE: this is a special purpose "
"driver to allow nonstandard\n");
printk(KERN_INFO KBUILD_MODNAME ": protocols (eg. bitbang) over "
"USS720 usb to parallel cables\n");
printk(KERN_INFO KBUILD_MODNAME ": If you just want to connect to a "
"printer, use usblp instead\n");
out:
return retval;
}
static void __exit uss720_cleanup(void)
{
usb_deregister(&uss720_driver);
}
module_init(uss720_init);
module_exit(uss720_cleanup);
/* --------------------------------------------------------------------- */
| linux-master | drivers/usb/misc/uss720.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/misc/lvstest.c
*
* Test pattern generation for Link Layer Validation System Tests
*
* Copyright (C) 2014 ST Microelectronics
* Pratyush Anand <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/ch11.h>
#include <linux/usb/hcd.h>
#include <linux/usb/phy.h>
struct lvs_rh {
/* root hub interface */
struct usb_interface *intf;
/* if lvs device connected */
bool present;
/* port no at which lvs device is present */
int portnum;
/* urb buffer */
u8 buffer[8];
/* class descriptor */
struct usb_hub_descriptor descriptor;
/* urb for polling interrupt pipe */
struct urb *urb;
/* LVH RH work */
struct work_struct rh_work;
/* RH port status */
struct usb_port_status port_status;
};
static struct usb_device *create_lvs_device(struct usb_interface *intf)
{
struct usb_device *udev, *hdev;
struct usb_hcd *hcd;
struct lvs_rh *lvs = usb_get_intfdata(intf);
if (!lvs->present) {
dev_err(&intf->dev, "No LVS device is present\n");
return NULL;
}
hdev = interface_to_usbdev(intf);
hcd = bus_to_hcd(hdev->bus);
udev = usb_alloc_dev(hdev, hdev->bus, lvs->portnum);
if (!udev) {
dev_err(&intf->dev, "Could not allocate lvs udev\n");
return NULL;
}
udev->speed = USB_SPEED_SUPER;
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
usb_set_device_state(udev, USB_STATE_DEFAULT);
if (hcd->driver->enable_device) {
if (hcd->driver->enable_device(hcd, udev) < 0) {
dev_err(&intf->dev, "Failed to enable\n");
usb_put_dev(udev);
return NULL;
}
}
return udev;
}
static void destroy_lvs_device(struct usb_device *udev)
{
struct usb_device *hdev = udev->parent;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->free_dev)
hcd->driver->free_dev(hcd, udev);
usb_put_dev(udev);
}
static int lvs_rh_clear_port_feature(struct usb_device *hdev,
int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
static int lvs_rh_set_port_feature(struct usb_device *hdev,
int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
static ssize_t u3_entry_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
struct usb_device *udev;
int ret;
udev = create_lvs_device(intf);
if (!udev) {
dev_err(dev, "failed to create lvs device\n");
return -ENOMEM;
}
ret = lvs_rh_set_port_feature(hdev, lvs->portnum,
USB_PORT_FEAT_SUSPEND);
if (ret < 0)
dev_err(dev, "can't issue U3 entry %d\n", ret);
destroy_lvs_device(udev);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_WO(u3_entry);
static ssize_t u3_exit_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
struct usb_device *udev;
int ret;
udev = create_lvs_device(intf);
if (!udev) {
dev_err(dev, "failed to create lvs device\n");
return -ENOMEM;
}
ret = lvs_rh_clear_port_feature(hdev, lvs->portnum,
USB_PORT_FEAT_SUSPEND);
if (ret < 0)
dev_err(dev, "can't issue U3 exit %d\n", ret);
destroy_lvs_device(udev);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_WO(u3_exit);
static ssize_t hot_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
int ret;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum,
USB_PORT_FEAT_RESET);
if (ret < 0) {
dev_err(dev, "can't issue hot reset %d\n", ret);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(hot_reset);
static ssize_t warm_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
int ret;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum,
USB_PORT_FEAT_BH_PORT_RESET);
if (ret < 0) {
dev_err(dev, "can't issue warm reset %d\n", ret);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(warm_reset);
static ssize_t u2_timeout_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret < 0) {
dev_err(dev, "couldn't parse string %d\n", ret);
return ret;
}
if (val > 127)
return -EINVAL;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum | (val << 8),
USB_PORT_FEAT_U2_TIMEOUT);
if (ret < 0) {
dev_err(dev, "Error %d while setting U2 timeout %ld\n", ret, val);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(u2_timeout);
static ssize_t u1_timeout_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret < 0) {
dev_err(dev, "couldn't parse string %d\n", ret);
return ret;
}
if (val > 127)
return -EINVAL;
ret = lvs_rh_set_port_feature(hdev, lvs->portnum | (val << 8),
USB_PORT_FEAT_U1_TIMEOUT);
if (ret < 0) {
dev_err(dev, "Error %d while setting U1 timeout %ld\n", ret, val);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(u1_timeout);
static ssize_t get_dev_desc_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev;
struct usb_device_descriptor *descriptor;
int ret;
descriptor = kmalloc(sizeof(*descriptor), GFP_KERNEL);
if (!descriptor)
return -ENOMEM;
udev = create_lvs_device(intf);
if (!udev) {
dev_err(dev, "failed to create lvs device\n");
ret = -ENOMEM;
goto free_desc;
}
ret = usb_control_msg(udev, (PIPE_CONTROL << 30) | USB_DIR_IN,
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8,
0, descriptor, sizeof(*descriptor),
USB_CTRL_GET_TIMEOUT);
if (ret < 0)
dev_err(dev, "can't read device descriptor %d\n", ret);
destroy_lvs_device(udev);
free_desc:
kfree(descriptor);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_WO(get_dev_desc);
static ssize_t enable_compliance_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *hdev = interface_to_usbdev(intf);
struct lvs_rh *lvs = usb_get_intfdata(intf);
int ret;
ret = lvs_rh_set_port_feature(hdev,
lvs->portnum | USB_SS_PORT_LS_COMP_MOD << 3,
USB_PORT_FEAT_LINK_STATE);
if (ret < 0) {
dev_err(dev, "can't enable compliance mode %d\n", ret);
return ret;
}
return count;
}
static DEVICE_ATTR_WO(enable_compliance);
static struct attribute *lvs_attrs[] = {
&dev_attr_get_dev_desc.attr,
&dev_attr_u1_timeout.attr,
&dev_attr_u2_timeout.attr,
&dev_attr_hot_reset.attr,
&dev_attr_warm_reset.attr,
&dev_attr_u3_entry.attr,
&dev_attr_u3_exit.attr,
&dev_attr_enable_compliance.attr,
NULL
};
ATTRIBUTE_GROUPS(lvs);
static void lvs_rh_work(struct work_struct *work)
{
struct lvs_rh *lvs = container_of(work, struct lvs_rh, rh_work);
struct usb_interface *intf = lvs->intf;
struct usb_device *hdev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_hub_descriptor *descriptor = &lvs->descriptor;
struct usb_port_status *port_status = &lvs->port_status;
int i, ret = 0;
u16 portchange;
/* Examine each root port */
for (i = 1; i <= descriptor->bNbrPorts; i++) {
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, i,
port_status, sizeof(*port_status), 1000);
if (ret < 4)
continue;
portchange = le16_to_cpu(port_status->wPortChange);
if (portchange & USB_PORT_STAT_C_LINK_STATE)
lvs_rh_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_PORT_LINK_STATE);
if (portchange & USB_PORT_STAT_C_ENABLE)
lvs_rh_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_ENABLE);
if (portchange & USB_PORT_STAT_C_RESET)
lvs_rh_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_RESET);
if (portchange & USB_PORT_STAT_C_BH_RESET)
lvs_rh_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_BH_PORT_RESET);
if (portchange & USB_PORT_STAT_C_CONNECTION) {
lvs_rh_clear_port_feature(hdev, i,
USB_PORT_FEAT_C_CONNECTION);
if (le16_to_cpu(port_status->wPortStatus) &
USB_PORT_STAT_CONNECTION) {
lvs->present = true;
lvs->portnum = i;
if (hcd->usb_phy)
usb_phy_notify_connect(hcd->usb_phy,
USB_SPEED_SUPER);
} else {
lvs->present = false;
if (hcd->usb_phy)
usb_phy_notify_disconnect(hcd->usb_phy,
USB_SPEED_SUPER);
}
break;
}
}
ret = usb_submit_urb(lvs->urb, GFP_KERNEL);
if (ret != 0 && ret != -ENODEV && ret != -EPERM)
dev_err(&intf->dev, "urb resubmit error %d\n", ret);
}
static void lvs_rh_irq(struct urb *urb)
{
struct lvs_rh *lvs = urb->context;
schedule_work(&lvs->rh_work);
}
static int lvs_rh_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *hdev;
struct usb_host_interface *desc;
struct usb_endpoint_descriptor *endpoint;
struct lvs_rh *lvs;
unsigned int pipe;
int ret, maxp;
hdev = interface_to_usbdev(intf);
desc = intf->cur_altsetting;
ret = usb_find_int_in_endpoint(desc, &endpoint);
if (ret)
return ret;
/* valid only for SS root hub */
if (hdev->descriptor.bDeviceProtocol != USB_HUB_PR_SS || hdev->parent) {
dev_err(&intf->dev, "Bind LVS driver with SS root Hub only\n");
return -EINVAL;
}
lvs = devm_kzalloc(&intf->dev, sizeof(*lvs), GFP_KERNEL);
if (!lvs)
return -ENOMEM;
lvs->intf = intf;
usb_set_intfdata(intf, lvs);
/* how many number of ports this root hub has */
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
USB_DT_SS_HUB << 8, 0, &lvs->descriptor,
USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
return ret < 0 ? ret : -EINVAL;
}
/* submit urb to poll interrupt endpoint */
lvs->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!lvs->urb)
return -ENOMEM;
INIT_WORK(&lvs->rh_work, lvs_rh_work);
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe);
usb_fill_int_urb(lvs->urb, hdev, pipe, &lvs->buffer[0], maxp,
lvs_rh_irq, lvs, endpoint->bInterval);
ret = usb_submit_urb(lvs->urb, GFP_KERNEL);
if (ret < 0) {
dev_err(&intf->dev, "couldn't submit lvs urb %d\n", ret);
goto free_urb;
}
return ret;
free_urb:
usb_free_urb(lvs->urb);
return ret;
}
static void lvs_rh_disconnect(struct usb_interface *intf)
{
struct lvs_rh *lvs = usb_get_intfdata(intf);
usb_poison_urb(lvs->urb); /* used in scheduled work */
flush_work(&lvs->rh_work);
usb_free_urb(lvs->urb);
}
static struct usb_driver lvs_driver = {
.name = "lvs",
.probe = lvs_rh_probe,
.disconnect = lvs_rh_disconnect,
.dev_groups = lvs_groups,
};
module_usb_driver(lvs_driver);
MODULE_DESCRIPTION("Link Layer Validation System Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/lvstest.c |
// SPDX-License-Identifier: GPL-2.0
/* -*- linux-c -*-
* Cypress USB Thermometer driver
*
* Copyright (c) 2004 Erik Rigtorp <[email protected]> <[email protected]>
*
* This driver works with Elektor magazine USB Interface as published in
* issue #291. It should also work with the original starter kit/demo board
* from Cypress.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Erik Rigtorp"
#define DRIVER_DESC "Cypress USB Thermometer driver"
#define USB_SKEL_VENDOR_ID 0x04b4
#define USB_SKEL_PRODUCT_ID 0x0002
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE (usb, id_table);
/* Structure to hold all of our device specific stuff */
struct usb_cytherm {
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface; /* the interface for this device */
int brightness;
};
/* Vendor requests */
/* They all operate on one byte at a time */
#define PING 0x00
#define READ_ROM 0x01 /* Reads form ROM, value = address */
#define READ_RAM 0x02 /* Reads form RAM, value = address */
#define WRITE_RAM 0x03 /* Write to RAM, value = address, index = data */
#define READ_PORT 0x04 /* Reads from port, value = address */
#define WRITE_PORT 0x05 /* Write to port, value = address, index = data */
/* Send a vendor command to device */
static int vendor_command(struct usb_device *dev, unsigned char request,
unsigned char value, unsigned char index,
void *buf, int size)
{
return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
value,
index, buf, size,
USB_CTRL_GET_TIMEOUT);
}
#define BRIGHTNESS 0x2c /* RAM location for brightness value */
#define BRIGHTNESS_SEM 0x2b /* RAM location for brightness semaphore */
static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
return sprintf(buf, "%i", cytherm->brightness);
}
static ssize_t brightness_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
unsigned char *buffer;
int retval;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
cytherm->brightness = simple_strtoul(buf, NULL, 10);
if (cytherm->brightness > 0xFF)
cytherm->brightness = 0xFF;
else if (cytherm->brightness < 0)
cytherm->brightness = 0;
/* Set brightness */
retval = vendor_command(cytherm->udev, WRITE_RAM, BRIGHTNESS,
cytherm->brightness, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
/* Inform µC that we have changed the brightness setting */
retval = vendor_command(cytherm->udev, WRITE_RAM, BRIGHTNESS_SEM,
0x01, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
kfree(buffer);
return count;
}
static DEVICE_ATTR_RW(brightness);
#define TEMP 0x33 /* RAM location for temperature */
#define SIGN 0x34 /* RAM location for temperature sign */
static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
int retval;
unsigned char *buffer;
int temp, sign;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
/* read temperature */
retval = vendor_command(cytherm->udev, READ_RAM, TEMP, 0, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
temp = buffer[1];
/* read sign */
retval = vendor_command(cytherm->udev, READ_RAM, SIGN, 0, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
sign = buffer[1];
kfree(buffer);
return sprintf(buf, "%c%i.%i", sign ? '-' : '+', temp >> 1,
5*(temp - ((temp >> 1) << 1)));
}
static DEVICE_ATTR_RO(temp);
#define BUTTON 0x7a
static ssize_t button_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
int retval;
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
/* check button */
retval = vendor_command(cytherm->udev, READ_RAM, BUTTON, 0, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
retval = buffer[1];
kfree(buffer);
if (retval)
return sprintf(buf, "1");
else
return sprintf(buf, "0");
}
static DEVICE_ATTR_RO(button);
static ssize_t port0_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
int retval;
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
retval = vendor_command(cytherm->udev, READ_PORT, 0, 0, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
retval = buffer[1];
kfree(buffer);
return sprintf(buf, "%d", retval);
}
static ssize_t port0_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
unsigned char *buffer;
int retval;
int tmp;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
tmp = simple_strtoul(buf, NULL, 10);
if (tmp > 0xFF)
tmp = 0xFF;
else if (tmp < 0)
tmp = 0;
retval = vendor_command(cytherm->udev, WRITE_PORT, 0,
tmp, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
kfree(buffer);
return count;
}
static DEVICE_ATTR_RW(port0);
static ssize_t port1_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
int retval;
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
retval = vendor_command(cytherm->udev, READ_PORT, 1, 0, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
retval = buffer[1];
kfree(buffer);
return sprintf(buf, "%d", retval);
}
static ssize_t port1_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_cytherm *cytherm = usb_get_intfdata(intf);
unsigned char *buffer;
int retval;
int tmp;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return 0;
tmp = simple_strtoul(buf, NULL, 10);
if (tmp > 0xFF)
tmp = 0xFF;
else if (tmp < 0)
tmp = 0;
retval = vendor_command(cytherm->udev, WRITE_PORT, 1,
tmp, buffer, 8);
if (retval)
dev_dbg(&cytherm->udev->dev, "retval = %d\n", retval);
kfree(buffer);
return count;
}
static DEVICE_ATTR_RW(port1);
static struct attribute *cytherm_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_temp.attr,
&dev_attr_button.attr,
&dev_attr_port0.attr,
&dev_attr_port1.attr,
NULL,
};
ATTRIBUTE_GROUPS(cytherm);
static int cytherm_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_cytherm *dev;
int retval = -ENOMEM;
dev = kzalloc(sizeof(struct usb_cytherm), GFP_KERNEL);
if (!dev)
goto error_mem;
dev->udev = usb_get_dev(udev);
usb_set_intfdata(interface, dev);
dev->brightness = 0xFF;
dev_info(&interface->dev,
"Cypress thermometer device now attached\n");
return 0;
error_mem:
return retval;
}
static void cytherm_disconnect(struct usb_interface *interface)
{
struct usb_cytherm *dev;
dev = usb_get_intfdata(interface);
/* first remove the files, then NULL the pointer */
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
dev_info(&interface->dev, "Cypress thermometer now disconnected\n");
}
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver cytherm_driver = {
.name = "cytherm",
.probe = cytherm_probe,
.disconnect = cytherm_disconnect,
.id_table = id_table,
.dev_groups = cytherm_groups,
};
module_usb_driver(cytherm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/cytherm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Meywa-Denki & KAYAC YUREX
*
* Copyright (C) 2010 Tomoki Sekiyama ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/hid.h>
#define DRIVER_AUTHOR "Tomoki Sekiyama"
#define DRIVER_DESC "Driver for Meywa-Denki & KAYAC YUREX"
#define YUREX_VENDOR_ID 0x0c45
#define YUREX_PRODUCT_ID 0x1010
#define CMD_ACK '!'
#define CMD_ANIMATE 'A'
#define CMD_COUNT 'C'
#define CMD_LED 'L'
#define CMD_READ 'R'
#define CMD_SET 'S'
#define CMD_VERSION 'V'
#define CMD_EOF 0x0d
#define CMD_PADDING 0xff
#define YUREX_BUF_SIZE 8
#define YUREX_WRITE_TIMEOUT (HZ*2)
/* table of devices that work with this driver */
static struct usb_device_id yurex_table[] = {
{ USB_DEVICE(YUREX_VENDOR_ID, YUREX_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, yurex_table);
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define YUREX_MINOR_BASE 0
#else
#define YUREX_MINOR_BASE 192
#endif
/* Structure to hold all of our device specific stuff */
struct usb_yurex {
struct usb_device *udev;
struct usb_interface *interface;
__u8 int_in_endpointAddr;
struct urb *urb; /* URB for interrupt in */
unsigned char *int_buffer; /* buffer for intterupt in */
struct urb *cntl_urb; /* URB for control msg */
struct usb_ctrlrequest *cntl_req; /* req for control msg */
unsigned char *cntl_buffer; /* buffer for control msg */
struct kref kref;
struct mutex io_mutex;
unsigned long disconnected:1;
struct fasync_struct *async_queue;
wait_queue_head_t waitq;
spinlock_t lock;
__s64 bbu; /* BBU from device */
};
#define to_yurex_dev(d) container_of(d, struct usb_yurex, kref)
static struct usb_driver yurex_driver;
static const struct file_operations yurex_fops;
static void yurex_control_callback(struct urb *urb)
{
struct usb_yurex *dev = urb->context;
int status = urb->status;
if (status) {
dev_err(&urb->dev->dev, "%s - control failed: %d\n",
__func__, status);
wake_up_interruptible(&dev->waitq);
return;
}
/* on success, sender woken up by CMD_ACK int in, or timeout */
}
static void yurex_delete(struct kref *kref)
{
struct usb_yurex *dev = to_yurex_dev(kref);
dev_dbg(&dev->interface->dev, "%s\n", __func__);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->cntl_buffer, dev->cntl_urb->transfer_dma);
usb_free_urb(dev->cntl_urb);
}
if (dev->urb) {
usb_kill_urb(dev->urb);
usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev);
}
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver yurex_class = {
.name = "yurex%d",
.fops = &yurex_fops,
.minor_base = YUREX_MINOR_BASE,
};
static void yurex_interrupt(struct urb *urb)
{
struct usb_yurex *dev = urb->context;
unsigned char *buf = dev->int_buffer;
int status = urb->status;
unsigned long flags;
int retval, i;
switch (status) {
case 0: /*success*/
break;
/* The device is terminated or messed up, give up */
case -EOVERFLOW:
dev_err(&dev->interface->dev,
"%s - overflow with length %d, actual length is %d\n",
__func__, YUREX_BUF_SIZE, dev->urb->actual_length);
return;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -EILSEQ:
case -EPROTO:
case -ETIME:
return;
default:
dev_err(&dev->interface->dev,
"%s - unknown status received: %d\n", __func__, status);
return;
}
/* handle received message */
switch (buf[0]) {
case CMD_COUNT:
case CMD_READ:
if (buf[6] == CMD_EOF) {
spin_lock_irqsave(&dev->lock, flags);
dev->bbu = 0;
for (i = 1; i < 6; i++) {
dev->bbu += buf[i];
if (i != 5)
dev->bbu <<= 8;
}
dev_dbg(&dev->interface->dev, "%s count: %lld\n",
__func__, dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
}
else
dev_dbg(&dev->interface->dev,
"data format error - no EOF\n");
break;
case CMD_ACK:
dev_dbg(&dev->interface->dev, "%s ack: %c\n",
__func__, buf[1]);
wake_up_interruptible(&dev->waitq);
break;
}
retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
__func__, retval);
}
}
static int yurex_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_yurex *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int retval = -ENOMEM;
DEFINE_WAIT(wait);
int res;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto error;
kref_init(&dev->kref);
mutex_init(&dev->io_mutex);
spin_lock_init(&dev->lock);
init_waitqueue_head(&dev->waitq);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
res = usb_find_int_in_endpoint(iface_desc, &endpoint);
if (res) {
dev_err(&interface->dev, "Could not find endpoints\n");
retval = res;
goto error;
}
dev->int_in_endpointAddr = endpoint->bEndpointAddress;
/* allocate control URB */
dev->cntl_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->cntl_urb)
goto error;
/* allocate buffer for control req */
dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
if (!dev->cntl_req)
goto error;
/* allocate buffer for control msg */
dev->cntl_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
GFP_KERNEL,
&dev->cntl_urb->transfer_dma);
if (!dev->cntl_buffer) {
dev_err(&interface->dev, "Could not allocate cntl_buffer\n");
goto error;
}
/* configure control URB */
dev->cntl_req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS |
USB_RECIP_INTERFACE;
dev->cntl_req->bRequest = HID_REQ_SET_REPORT;
dev->cntl_req->wValue = cpu_to_le16((HID_OUTPUT_REPORT + 1) << 8);
dev->cntl_req->wIndex = cpu_to_le16(iface_desc->desc.bInterfaceNumber);
dev->cntl_req->wLength = cpu_to_le16(YUREX_BUF_SIZE);
usb_fill_control_urb(dev->cntl_urb, dev->udev,
usb_sndctrlpipe(dev->udev, 0),
(void *)dev->cntl_req, dev->cntl_buffer,
YUREX_BUF_SIZE, yurex_control_callback, dev);
dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* allocate interrupt URB */
dev->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb)
goto error;
/* allocate buffer for interrupt in */
dev->int_buffer = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
GFP_KERNEL, &dev->urb->transfer_dma);
if (!dev->int_buffer) {
dev_err(&interface->dev, "Could not allocate int_buffer\n");
goto error;
}
/* configure interrupt URB */
usb_fill_int_urb(dev->urb, dev->udev,
usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
dev, 1);
dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
retval = -EIO;
dev_err(&interface->dev, "Could not submitting URB\n");
goto error;
}
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
dev->bbu = -1;
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &yurex_class);
if (retval) {
dev_err(&interface->dev,
"Not able to get a minor for this device.\n");
usb_set_intfdata(interface, NULL);
goto error;
}
dev_info(&interface->dev,
"USB YUREX device now attached to Yurex #%d\n",
interface->minor);
return 0;
error:
if (dev)
/* this frees allocated memory */
kref_put(&dev->kref, yurex_delete);
return retval;
}
static void yurex_disconnect(struct usb_interface *interface)
{
struct usb_yurex *dev;
int minor = interface->minor;
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* give back our minor */
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
usb_poison_urb(dev->urb);
usb_poison_urb(dev->cntl_urb);
mutex_lock(&dev->io_mutex);
dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
/* wakeup waiters */
kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
wake_up_interruptible(&dev->waitq);
/* decrement our usage count */
kref_put(&dev->kref, yurex_delete);
dev_info(&interface->dev, "USB YUREX #%d now disconnected\n", minor);
}
static struct usb_driver yurex_driver = {
.name = "yurex",
.probe = yurex_probe,
.disconnect = yurex_disconnect,
.id_table = yurex_table,
};
static int yurex_fasync(int fd, struct file *file, int on)
{
struct usb_yurex *dev;
dev = file->private_data;
return fasync_helper(fd, file, on, &dev->async_queue);
}
static int yurex_open(struct inode *inode, struct file *file)
{
struct usb_yurex *dev;
struct usb_interface *interface;
int subminor;
int retval = 0;
subminor = iminor(inode);
interface = usb_find_interface(&yurex_driver, subminor);
if (!interface) {
printk(KERN_ERR "%s - error, can't find device for minor %d",
__func__, subminor);
retval = -ENODEV;
goto exit;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
goto exit;
}
/* increment our usage count for the device */
kref_get(&dev->kref);
/* save our object in the file's private structure */
mutex_lock(&dev->io_mutex);
file->private_data = dev;
mutex_unlock(&dev->io_mutex);
exit:
return retval;
}
static int yurex_release(struct inode *inode, struct file *file)
{
struct usb_yurex *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* decrement the count on our device */
kref_put(&dev->kref, yurex_delete);
return 0;
}
static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_yurex *dev;
int len = 0;
char in_buffer[20];
unsigned long flags;
dev = file->private_data;
mutex_lock(&dev->io_mutex);
if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
return -ENODEV;
}
spin_lock_irqsave(&dev->lock, flags);
len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->io_mutex);
if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
return -EIO;
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
size_t count, loff_t *ppos)
{
struct usb_yurex *dev;
int i, set = 0, retval = 0;
char buffer[16 + 1];
char *data = buffer;
unsigned long long c, c2 = 0;
signed long timeout = 0;
DEFINE_WAIT(wait);
count = min(sizeof(buffer) - 1, count);
dev = file->private_data;
/* verify that we actually have some data to write */
if (count == 0)
goto error;
mutex_lock(&dev->io_mutex);
if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
}
if (copy_from_user(buffer, user_buffer, count)) {
mutex_unlock(&dev->io_mutex);
retval = -EFAULT;
goto error;
}
buffer[count] = 0;
memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
switch (buffer[0]) {
case CMD_ANIMATE:
case CMD_LED:
dev->cntl_buffer[0] = buffer[0];
dev->cntl_buffer[1] = buffer[1];
dev->cntl_buffer[2] = CMD_EOF;
break;
case CMD_READ:
case CMD_VERSION:
dev->cntl_buffer[0] = buffer[0];
dev->cntl_buffer[1] = 0x00;
dev->cntl_buffer[2] = CMD_EOF;
break;
case CMD_SET:
data++;
fallthrough;
case '0' ... '9':
set = 1;
c = c2 = simple_strtoull(data, NULL, 0);
dev->cntl_buffer[0] = CMD_SET;
for (i = 1; i < 6; i++) {
dev->cntl_buffer[i] = (c>>32) & 0xff;
c <<= 8;
}
buffer[6] = CMD_EOF;
break;
default:
mutex_unlock(&dev->io_mutex);
return -EINVAL;
}
/* send the data as the control msg */
prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
dev->cntl_buffer[0]);
retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
if (retval >= 0)
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
/* make sure URB is idle after timeout or (spurious) CMD_ACK */
usb_kill_urb(dev->cntl_urb);
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
dev_err(&dev->interface->dev,
"%s - failed to send bulk msg, error %d\n",
__func__, retval);
goto error;
}
if (set && timeout)
dev->bbu = c2;
return timeout ? count : -EIO;
error:
return retval;
}
static const struct file_operations yurex_fops = {
.owner = THIS_MODULE,
.read = yurex_read,
.write = yurex_write,
.open = yurex_open,
.release = yurex_release,
.fasync = yurex_fasync,
.llseek = default_llseek,
};
module_usb_driver(yurex_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/yurex.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Apple Cinema Display driver
*
* Copyright (C) 2006 Michael Hanselmann ([email protected])
*
* Thanks to Caskey L. Dickson for his work with acdctl.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/backlight.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#define APPLE_VENDOR_ID 0x05AC
#define USB_REQ_GET_REPORT 0x01
#define USB_REQ_SET_REPORT 0x09
#define ACD_USB_TIMEOUT 250
#define ACD_USB_EDID 0x0302
#define ACD_USB_BRIGHTNESS 0x0310
#define ACD_BTN_NONE 0
#define ACD_BTN_BRIGHT_UP 3
#define ACD_BTN_BRIGHT_DOWN 4
#define ACD_URB_BUFFER_LEN 2
#define ACD_MSG_BUFFER_LEN 2
#define APPLEDISPLAY_DEVICE(prod) \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
.idVendor = APPLE_VENDOR_ID, \
.idProduct = (prod), \
.bInterfaceClass = USB_CLASS_HID, \
.bInterfaceProtocol = 0x00
/* table of devices that work with this driver */
static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9218) },
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
{ APPLEDISPLAY_DEVICE(0x9222) },
{ APPLEDISPLAY_DEVICE(0x9226) },
{ APPLEDISPLAY_DEVICE(0x9236) },
/* Terminating entry */
{ }
};
MODULE_DEVICE_TABLE(usb, appledisplay_table);
/* Structure to hold all of our device specific stuff */
struct appledisplay {
struct usb_device *udev; /* usb device */
struct urb *urb; /* usb request block */
struct backlight_device *bd; /* backlight device */
u8 *urbdata; /* interrupt URB data buffer */
u8 *msgdata; /* control message data buffer */
struct delayed_work work;
int button_pressed;
struct mutex sysfslock; /* concurrent read and write */
};
static atomic_t count_displays = ATOMIC_INIT(0);
static void appledisplay_complete(struct urb *urb)
{
struct appledisplay *pdata = urb->context;
struct device *dev = &pdata->udev->dev;
int status = urb->status;
int retval;
switch (status) {
case 0:
/* success */
break;
case -EOVERFLOW:
dev_err(dev,
"OVERFLOW with data length %d, actual length is %d\n",
ACD_URB_BUFFER_LEN, pdata->urb->actual_length);
fallthrough;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
dev_dbg(dev, "%s - urb shuttingdown with status: %d\n",
__func__, status);
return;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
switch(pdata->urbdata[1]) {
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
schedule_delayed_work(&pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
pdata->button_pressed = 0;
break;
}
exit:
retval = usb_submit_urb(pdata->urb, GFP_ATOMIC);
if (retval) {
dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
}
static int appledisplay_bl_update_status(struct backlight_device *bd)
{
struct appledisplay *pdata = bl_get_data(bd);
int retval;
mutex_lock(&pdata->sysfslock);
pdata->msgdata[0] = 0x10;
pdata->msgdata[1] = bd->props.brightness;
retval = usb_control_msg(
pdata->udev,
usb_sndctrlpipe(pdata->udev, 0),
USB_REQ_SET_REPORT,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
ACD_USB_BRIGHTNESS,
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
return retval;
else
return 0;
}
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
{
struct appledisplay *pdata = bl_get_data(bd);
int retval, brightness;
mutex_lock(&pdata->sysfslock);
retval = usb_control_msg(
pdata->udev,
usb_rcvctrlpipe(pdata->udev, 0),
USB_REQ_GET_REPORT,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
ACD_USB_BRIGHTNESS,
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
if (retval < 2) {
if (retval >= 0)
retval = -EMSGSIZE;
} else {
brightness = pdata->msgdata[1];
}
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
return retval;
else
return brightness;
}
static const struct backlight_ops appledisplay_bl_data = {
.get_brightness = appledisplay_bl_get_brightness,
.update_status = appledisplay_bl_update_status,
};
static void appledisplay_work(struct work_struct *work)
{
struct appledisplay *pdata =
container_of(work, struct appledisplay, work.work);
int retval;
retval = appledisplay_bl_get_brightness(pdata->bd);
if (retval >= 0)
pdata->bd->props.brightness = retval;
/* Poll again in about 125ms if there's still a button pressed */
if (pdata->button_pressed)
schedule_delayed_work(&pdata->work, HZ / 8);
}
static int appledisplay_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct backlight_properties props;
struct appledisplay *pdata;
struct usb_device *udev = interface_to_usbdev(iface);
struct usb_endpoint_descriptor *endpoint;
int int_in_endpointAddr = 0;
int retval, brightness;
char bl_name[20];
/* set up the endpoint information */
/* use only the first interrupt-in endpoint */
retval = usb_find_int_in_endpoint(iface->cur_altsetting, &endpoint);
if (retval) {
dev_err(&iface->dev, "Could not find int-in endpoint\n");
return retval;
}
int_in_endpointAddr = endpoint->bEndpointAddress;
/* allocate memory for our device state and initialize it */
pdata = kzalloc(sizeof(struct appledisplay), GFP_KERNEL);
if (!pdata) {
retval = -ENOMEM;
goto error;
}
pdata->udev = udev;
INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
mutex_init(&pdata->sysfslock);
/* Allocate buffer for control messages */
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
if (!pdata->msgdata) {
retval = -ENOMEM;
goto error;
}
/* Allocate interrupt URB */
pdata->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pdata->urb) {
retval = -ENOMEM;
goto error;
}
/* Allocate buffer for interrupt data */
pdata->urbdata = usb_alloc_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
GFP_KERNEL, &pdata->urb->transfer_dma);
if (!pdata->urbdata) {
retval = -ENOMEM;
dev_err(&iface->dev, "Allocating URB buffer failed\n");
goto error;
}
/* Configure interrupt URB */
usb_fill_int_urb(pdata->urb, udev,
usb_rcvintpipe(udev, int_in_endpointAddr),
pdata->urbdata, ACD_URB_BUFFER_LEN, appledisplay_complete,
pdata, 1);
pdata->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
if (usb_submit_urb(pdata->urb, GFP_KERNEL)) {
retval = -EIO;
dev_err(&iface->dev, "Submitting URB failed\n");
goto error;
}
/* Register backlight device */
snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
atomic_inc_return(&count_displays) - 1);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 0xff;
pdata->bd = backlight_device_register(bl_name, NULL, pdata,
&appledisplay_bl_data, &props);
if (IS_ERR(pdata->bd)) {
dev_err(&iface->dev, "Backlight registration failed\n");
retval = PTR_ERR(pdata->bd);
goto error;
}
/* Try to get brightness */
brightness = appledisplay_bl_get_brightness(pdata->bd);
if (brightness < 0) {
retval = brightness;
dev_err(&iface->dev,
"Error while getting initial brightness: %d\n", retval);
goto error;
}
/* Set brightness in backlight device */
pdata->bd->props.brightness = brightness;
/* save our data pointer in the interface device */
usb_set_intfdata(iface, pdata);
printk(KERN_INFO "appledisplay: Apple Cinema Display connected\n");
return 0;
error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
cancel_delayed_work_sync(&pdata->work);
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
}
if (!IS_ERR(pdata->bd))
backlight_device_unregister(pdata->bd);
kfree(pdata->msgdata);
}
usb_set_intfdata(iface, NULL);
kfree(pdata);
return retval;
}
static void appledisplay_disconnect(struct usb_interface *iface)
{
struct appledisplay *pdata = usb_get_intfdata(iface);
if (pdata) {
usb_kill_urb(pdata->urb);
cancel_delayed_work_sync(&pdata->work);
backlight_device_unregister(pdata->bd);
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
usb_free_urb(pdata->urb);
kfree(pdata->msgdata);
kfree(pdata);
}
printk(KERN_INFO "appledisplay: Apple Cinema Display disconnected\n");
}
static struct usb_driver appledisplay_driver = {
.name = "appledisplay",
.probe = appledisplay_probe,
.disconnect = appledisplay_disconnect,
.id_table = appledisplay_table,
};
module_usb_driver(appledisplay_driver);
MODULE_AUTHOR("Michael Hanselmann");
MODULE_DESCRIPTION("Apple Cinema Display driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/appledisplay.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* LEGO USB Tower driver
*
* Copyright (C) 2003 David Glance <[email protected]>
* 2001-2004 Juergen Stuber <[email protected]>
*
* derived from USB Skeleton driver - 0.5
* Copyright (C) 2001 Greg Kroah-Hartman ([email protected])
*
* History:
*
* 2001-10-13 - 0.1 js
* - first version
* 2001-11-03 - 0.2 js
* - simplified buffering, one-shot URBs for writing
* 2001-11-10 - 0.3 js
* - removed IOCTL (setting power/mode is more complicated, postponed)
* 2001-11-28 - 0.4 js
* - added vendor commands for mode of operation and power level in open
* 2001-12-04 - 0.5 js
* - set IR mode by default (by oversight 0.4 set VLL mode)
* 2002-01-11 - 0.5? pcchan
* - make read buffer reusable and work around bytes_to_write issue between
* uhci and legusbtower
* 2002-09-23 - 0.52 david ([email protected])
* - imported into lejos project
* - changed wake_up to wake_up_interruptible
* - changed to use lego0 rather than tower0
* - changed dbg() to use __func__ rather than deprecated __func__
* 2003-01-12 - 0.53 david ([email protected])
* - changed read and write to write everything or
* timeout (from a patch by Chris Riesen and Brett Thaeler driver)
* - added ioctl functionality to set timeouts
* 2003-07-18 - 0.54 davidgsf ([email protected])
* - initial import into LegoUSB project
* - merge of existing LegoUSB.c driver
* 2003-07-18 - 0.56 davidgsf ([email protected])
* - port to 2.6 style driver
* 2004-02-29 - 0.6 Juergen Stuber <[email protected]>
* - fix locking
* - unlink read URBs which are no longer needed
* - allow increased buffer size, eliminates need for timeout on write
* - have read URB running continuously
* - added poll
* - forbid seeking
* - added nonblocking I/O
* - changed back __func__ to __func__
* - read and log tower firmware version
* - reset tower on probe, avoids failure of first write
* 2004-03-09 - 0.7 Juergen Stuber <[email protected]>
* - timeout read now only after inactivity, shorten default accordingly
* 2004-03-11 - 0.8 Juergen Stuber <[email protected]>
* - log major, minor instead of possibly confusing device filename
* - whitespace cleanup
* 2004-03-12 - 0.9 Juergen Stuber <[email protected]>
* - normalize whitespace in debug messages
* - take care about endianness in control message responses
* 2004-03-13 - 0.91 Juergen Stuber <[email protected]>
* - make default intervals longer to accommodate current EHCI driver
* 2004-03-19 - 0.92 Juergen Stuber <[email protected]>
* - replaced atomic_t by memory barriers
* 2004-04-21 - 0.93 Juergen Stuber <[email protected]>
* - wait for completion of write urb in release (needed for remotecontrol)
* - corrected poll for write direction (missing negation)
* 2004-04-22 - 0.94 Juergen Stuber <[email protected]>
* - make device locking interruptible
* 2004-04-30 - 0.95 Juergen Stuber <[email protected]>
* - check for valid udev on resubmitting and unlinking urbs
* 2004-08-03 - 0.96 Juergen Stuber <[email protected]>
* - move reset into open to clean out spurious data
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/poll.h>
#define DRIVER_AUTHOR "Juergen Stuber <[email protected]>"
#define DRIVER_DESC "LEGO USB Tower Driver"
/* The defaults are chosen to work with the latest versions of leJOS and NQC.
*/
/* Some legacy software likes to receive packets in one piece.
* In this case read_buffer_size should exceed the maximal packet length
* (417 for datalog uploads), and packet_timeout should be set.
*/
static int read_buffer_size = 480;
module_param(read_buffer_size, int, 0);
MODULE_PARM_DESC(read_buffer_size, "Read buffer size");
/* Some legacy software likes to send packets in one piece.
* In this case write_buffer_size should exceed the maximal packet length
* (417 for firmware and program downloads).
* A problem with long writes is that the following read may time out
* if the software is not prepared to wait long enough.
*/
static int write_buffer_size = 480;
module_param(write_buffer_size, int, 0);
MODULE_PARM_DESC(write_buffer_size, "Write buffer size");
/* Some legacy software expects reads to contain whole LASM packets.
* To achieve this, characters which arrive before a packet timeout
* occurs will be returned in a single read operation.
* A problem with long reads is that the software may time out
* if it is not prepared to wait long enough.
* The packet timeout should be greater than the time between the
* reception of subsequent characters, which should arrive about
* every 5ms for the standard 2400 baud.
* Set it to 0 to disable.
*/
static int packet_timeout = 50;
module_param(packet_timeout, int, 0);
MODULE_PARM_DESC(packet_timeout, "Packet timeout in ms");
/* Some legacy software expects blocking reads to time out.
* Timeout occurs after the specified time of read and write inactivity.
* Set it to 0 to disable.
*/
static int read_timeout = 200;
module_param(read_timeout, int, 0);
MODULE_PARM_DESC(read_timeout, "Read timeout in ms");
/* As of kernel version 2.6.4 ehci-hcd uses an
* "only one interrupt transfer per frame" shortcut
* to simplify the scheduling of periodic transfers.
* This conflicts with our standard 1ms intervals for in and out URBs.
* We use default intervals of 2ms for in and 8ms for out transfers,
* which is fast enough for 2400 baud and allows a small additional load.
* Increase the interval to allow more devices that do interrupt transfers,
* or set to 0 to use the standard interval from the endpoint descriptors.
*/
static int interrupt_in_interval = 2;
module_param(interrupt_in_interval, int, 0);
MODULE_PARM_DESC(interrupt_in_interval, "Interrupt in interval in ms");
static int interrupt_out_interval = 8;
module_param(interrupt_out_interval, int, 0);
MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
/* Define these values to match your device */
#define LEGO_USB_TOWER_VENDOR_ID 0x0694
#define LEGO_USB_TOWER_PRODUCT_ID 0x0001
/* Vendor requests */
#define LEGO_USB_TOWER_REQUEST_RESET 0x04
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
__le16 size;
__u8 err_code;
__u8 spare;
};
struct tower_get_version_reply {
__le16 size;
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
__le16 build_no;
};
/* table of devices that work with this driver */
static const struct usb_device_id tower_table[] = {
{ USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, tower_table);
#define LEGO_USB_TOWER_MINOR_BASE 160
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
struct usb_device *udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
char *read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
int packet_timeout_jiffies;
unsigned long read_last_arrival;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
char *interrupt_in_buffer;
struct usb_endpoint_descriptor *interrupt_in_endpoint;
struct urb *interrupt_in_urb;
int interrupt_in_interval;
int interrupt_in_done;
char *interrupt_out_buffer;
struct usb_endpoint_descriptor *interrupt_out_endpoint;
struct urb *interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
};
/* local function prototypes */
static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
static inline void tower_delete(struct lego_usb_tower *dev);
static int tower_open(struct inode *inode, struct file *file);
static int tower_release(struct inode *inode, struct file *file);
static __poll_t tower_poll(struct file *file, poll_table *wait);
static loff_t tower_llseek(struct file *file, loff_t off, int whence);
static void tower_check_for_read_packet(struct lego_usb_tower *dev);
static void tower_interrupt_in_callback(struct urb *urb);
static void tower_interrupt_out_callback(struct urb *urb);
static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id);
static void tower_disconnect(struct usb_interface *interface);
/* file operations needed when we register this driver */
static const struct file_operations tower_fops = {
.owner = THIS_MODULE,
.read = tower_read,
.write = tower_write,
.open = tower_open,
.release = tower_release,
.poll = tower_poll,
.llseek = tower_llseek,
};
static char *legousbtower_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver tower_class = {
.name = "legousbtower%d",
.devnode = legousbtower_devnode,
.fops = &tower_fops,
.minor_base = LEGO_USB_TOWER_MINOR_BASE,
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver tower_driver = {
.name = "legousbtower",
.probe = tower_probe,
.disconnect = tower_disconnect,
.id_table = tower_table,
};
/*
* lego_usb_tower_debug_data
*/
static inline void lego_usb_tower_debug_data(struct device *dev,
const char *function, int size,
const unsigned char *data)
{
dev_dbg(dev, "%s - length = %d, data = %*ph\n",
function, size, size, data);
}
/*
* tower_delete
*/
static inline void tower_delete(struct lego_usb_tower *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree(dev->read_buffer);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
kfree(dev);
}
/*
* tower_open
*/
static int tower_open(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
int retval = 0;
struct usb_interface *interface;
struct tower_reset_reply reset_reply;
int result;
nonseekable_open(inode, file);
subminor = iminor(inode);
interface = usb_find_interface(&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
dev = usb_get_intfdata(interface);
if (!dev) {
retval = -ENODEV;
goto exit;
}
/* lock this device */
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
/* allow opening only once */
if (dev->open_count) {
retval = -EBUSY;
goto unlock_exit;
}
/* reset the tower */
result = usb_control_msg_recv(dev->udev, 0,
LEGO_USB_TOWER_REQUEST_RESET,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0, 0,
&reset_reply, sizeof(reset_reply), 1000,
GFP_KERNEL);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
retval = result;
goto unlock_exit;
}
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
usb_fill_int_urb(dev->interrupt_in_urb,
dev->udev,
usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
tower_interrupt_in_callback,
dev,
dev->interrupt_in_interval);
dev->interrupt_in_done = 0;
mb();
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
dev->open_count = 1;
unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
/*
* tower_release
*/
static int tower_release(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
if (dev == NULL) {
retval = -ENODEV;
goto exit;
}
mutex_lock(&dev->lock);
if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
tower_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy,
2 * HZ);
}
/* shutdown transfers */
usb_kill_urb(dev->interrupt_in_urb);
usb_kill_urb(dev->interrupt_out_urb);
dev->open_count = 0;
mutex_unlock(&dev->lock);
exit:
return retval;
}
/*
* tower_check_for_read_packet
*
* To get correct semantics for signals and non-blocking I/O
* with packetizing we pretend not to see any data in the read buffer
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
static void tower_check_for_read_packet(struct lego_usb_tower *dev)
{
spin_lock_irq(&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
spin_unlock_irq(&dev->read_buffer_lock);
}
/*
* tower_poll
*/
static __poll_t tower_poll(struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
__poll_t mask = 0;
dev = file->private_data;
if (dev->disconnected)
return EPOLLERR | EPOLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
if (dev->read_packet_length > 0)
mask |= EPOLLIN | EPOLLRDNORM;
if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
/*
* tower_llseek
*/
static loff_t tower_llseek(struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
/*
* tower_read
*/
static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
int i;
int retval = 0;
unsigned long timeout = 0;
dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
goto unlock_exit;
}
/* verify that we actually have some data to read */
if (count == 0) {
dev_dbg(&dev->udev->dev, "read request of 0 bytes\n");
goto unlock_exit;
}
if (read_timeout)
timeout = jiffies + msecs_to_jiffies(read_timeout);
/* wait for data */
tower_check_for_read_packet(dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
if (retval < 0)
goto unlock_exit;
/* reset read timeout during read or write activity */
if (read_timeout
&& (dev->read_buffer_length || dev->interrupt_out_busy)) {
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
if (read_timeout && time_after(jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
tower_check_for_read_packet(dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
spin_lock_irq(&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
for (i = 0; i < dev->read_buffer_length; i++)
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
spin_unlock_irq(&dev->read_buffer_lock);
retval = bytes_to_read;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->lock);
exit:
return retval;
}
/*
* tower_write
*/
static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
int retval = 0;
dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
goto unlock_exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
dev_dbg(&dev->udev->dev, "write request of 0 bytes\n");
goto unlock_exit;
}
/* wait until previous transfer is finished */
while (dev->interrupt_out_busy) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible(dev->write_wait,
!dev->interrupt_out_busy);
if (retval)
goto unlock_exit;
}
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min_t(int, count, write_buffer_size);
dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
/* send off the urb */
usb_fill_int_urb(dev->interrupt_out_urb,
dev->udev,
usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
tower_interrupt_out_callback,
dev,
dev->interrupt_out_interval);
dev->interrupt_out_busy = 1;
wmb();
retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_out_urb %d\n", retval);
goto unlock_exit;
}
retval = bytes_to_write;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->lock);
exit:
return retval;
}
/*
* tower_interrupt_in_callback
*/
static void tower_interrupt_in_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
int retval;
unsigned long flags;
lego_usb_tower_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
if (status) {
if (status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN) {
goto exit;
} else {
dev_dbg(&dev->udev->dev,
"%s: nonzero status received: %d\n", __func__,
status);
goto resubmit; /* maybe we can recover */
}
}
if (urb->actual_length > 0) {
spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
memcpy(dev->read_buffer + dev->read_buffer_length,
dev->interrupt_in_buffer,
urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
__func__, urb->actual_length);
} else {
pr_warn("read_buffer overflow, %d bytes dropped\n",
urb->actual_length);
}
spin_unlock_irqrestore(&dev->read_buffer_lock, flags);
}
resubmit:
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n",
__func__, retval);
}
exit:
dev->interrupt_in_done = 1;
wake_up_interruptible(&dev->read_wait);
}
/*
* tower_interrupt_out_callback
*/
static void tower_interrupt_out_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
lego_usb_tower_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
/* sync/async unlink faults aren't errors */
if (status && !(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN)) {
dev_dbg(&dev->udev->dev,
"%s: nonzero write bulk status received: %d\n", __func__,
status);
}
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
}
/*
* tower_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev;
struct tower_get_version_reply get_version_reply;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
dev->udev = usb_get_dev(udev);
spin_lock_init(&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
&dev->interrupt_in_endpoint,
&dev->interrupt_out_endpoint);
if (result) {
dev_err(idev, "interrupt endpoints not found\n");
retval = result;
goto error;
}
dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
/* get the firmware version and log it */
result = usb_control_msg_recv(udev, 0,
LEGO_USB_TOWER_REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&get_version_reply,
sizeof(get_version_reply),
1000, GFP_KERNEL);
if (result) {
dev_err(idev, "get version request failed: %d\n", result);
retval = result;
goto error;
}
dev_info(&interface->dev,
"LEGO USB Tower firmware version is %d.%d build %d\n",
get_version_reply.major,
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
retval = usb_register_dev(interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
"%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
USB_MAJOR, dev->minor);
exit:
return retval;
error:
tower_delete(dev);
return retval;
}
/*
* tower_disconnect
*
* Called by the usb core when the device is removed from the system.
*/
static void tower_disconnect(struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
dev = usb_get_intfdata(interface);
minor = dev->minor;
/* give back our minor and prevent further open() */
usb_deregister_dev(interface, &tower_class);
/* stop I/O */
usb_poison_urb(dev->interrupt_in_urb);
usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&dev->lock);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
tower_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->lock);
}
dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n",
(minor - LEGO_USB_TOWER_MINOR_BASE));
}
module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/legousbtower.c |
// SPDX-License-Identifier: GPL-2.0+
/* Siemens ID Mouse driver v0.6
Copyright (C) 2004-5 by Florian 'Floe' Echtler <[email protected]>
and Andreas 'ad' Deresch <[email protected]>
Derived from the USB Skeleton driver 1.1,
Copyright (C) 2003 Greg Kroah-Hartman ([email protected])
Additional information provided by Martin Reising
<[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
/* image constants */
#define WIDTH 225
#define HEIGHT 289
#define HEADER "P5 225 289 255 "
#define IMGSIZE ((WIDTH * HEIGHT) + sizeof(HEADER)-1)
#define DRIVER_SHORT "idmouse"
#define DRIVER_AUTHOR "Florian 'Floe' Echtler <[email protected]>"
#define DRIVER_DESC "Siemens ID Mouse FingerTIP Sensor Driver"
/* minor number for misc USB devices */
#define USB_IDMOUSE_MINOR_BASE 132
/* vendor and device IDs */
#define ID_SIEMENS 0x0681
#define ID_IDMOUSE 0x0005
#define ID_CHERRY 0x0010
/* device ID table */
static const struct usb_device_id idmouse_table[] = {
{USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
{USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
{} /* terminating null entry */
};
/* sensor commands */
#define FTIP_RESET 0x20
#define FTIP_ACQUIRE 0x21
#define FTIP_RELEASE 0x22
#define FTIP_BLINK 0x23 /* LSB of value = blink pulse width */
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface; /* the interface for this device */
unsigned char *bulk_in_buffer; /* the buffer to receive data */
size_t bulk_in_size; /* the maximum bulk packet size */
size_t orig_bi_size; /* same as above, but reported by the device */
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
int open; /* if the port is open or not */
int present; /* if the device is not disconnected */
struct mutex lock; /* locks this structure */
};
/* local function prototypes */
static ssize_t idmouse_read(struct file *file, char __user *buffer,
size_t count, loff_t * ppos);
static int idmouse_open(struct inode *inode, struct file *file);
static int idmouse_release(struct inode *inode, struct file *file);
static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void idmouse_disconnect(struct usb_interface *interface);
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message);
static int idmouse_resume(struct usb_interface *intf);
/* file operation pointers */
static const struct file_operations idmouse_fops = {
.owner = THIS_MODULE,
.read = idmouse_read,
.open = idmouse_open,
.release = idmouse_release,
.llseek = default_llseek,
};
/* class driver information */
static struct usb_class_driver idmouse_class = {
.name = "idmouse%d",
.fops = &idmouse_fops,
.minor_base = USB_IDMOUSE_MINOR_BASE,
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver idmouse_driver = {
.name = DRIVER_SHORT,
.probe = idmouse_probe,
.disconnect = idmouse_disconnect,
.suspend = idmouse_suspend,
.resume = idmouse_resume,
.reset_resume = idmouse_resume,
.id_table = idmouse_table,
.supports_autosuspend = 1,
};
static int idmouse_create_image(struct usb_idmouse *dev)
{
int bytes_read;
int bulk_read;
int result;
memcpy(dev->bulk_in_buffer, HEADER, sizeof(HEADER)-1);
bytes_read = sizeof(HEADER)-1;
/* reset the device and set a fast blink rate */
result = ftip_command(dev, FTIP_RELEASE, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_BLINK, 1, 0);
if (result < 0)
goto reset;
/* initialize the sensor - sending this command twice */
/* significantly reduces the rate of failed reads */
result = ftip_command(dev, FTIP_ACQUIRE, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_ACQUIRE, 0, 0);
if (result < 0)
goto reset;
/* start the readout - sending this command twice */
/* presumably enables the high dynamic range mode */
result = ftip_command(dev, FTIP_RESET, 0, 0);
if (result < 0)
goto reset;
result = ftip_command(dev, FTIP_RESET, 0, 0);
if (result < 0)
goto reset;
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
result = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
/* Maybe this error was caused by the increased packet size? */
/* Reset to the original value and tell userspace to retry. */
if (dev->bulk_in_size != dev->orig_bi_size) {
dev->bulk_in_size = dev->orig_bi_size;
result = -EAGAIN;
}
break;
}
if (signal_pending(current)) {
result = -EINTR;
break;
}
bytes_read += bulk_read;
}
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
if (dev->bulk_in_buffer[bytes_read] != 0x00)
return -EAGAIN;
/* lower border should be white (0xFF) */
for (bytes_read = IMGSIZE-WIDTH; bytes_read < IMGSIZE-1; bytes_read++)
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
/* reset the device */
reset:
ftip_command(dev, FTIP_RELEASE, 0, 0);
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
return result;
}
/* PM operations are nops as this driver does IO only during open() */
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int idmouse_resume(struct usb_interface *intf)
{
return 0;
}
static inline void idmouse_delete(struct usb_idmouse *dev)
{
kfree(dev->bulk_in_buffer);
kfree(dev);
}
static int idmouse_open(struct inode *inode, struct file *file)
{
struct usb_idmouse *dev;
struct usb_interface *interface;
int result;
/* get the interface from minor number and driver information */
interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
if (!dev)
return -ENODEV;
/* lock this device */
mutex_lock(&dev->lock);
/* check if already open */
if (dev->open) {
/* already open, so fail */
result = -EBUSY;
} else {
/* create a new image and check for success */
result = usb_autopm_get_interface(interface);
if (result)
goto error;
result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
/* increment our usage count for the driver */
++dev->open;
/* save our object in the file's private structure */
file->private_data = dev;
}
error:
/* unlock this device */
mutex_unlock(&dev->lock);
return result;
}
static int idmouse_release(struct inode *inode, struct file *file)
{
struct usb_idmouse *dev;
dev = file->private_data;
if (dev == NULL)
return -ENODEV;
/* lock our device */
mutex_lock(&dev->lock);
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
}
return 0;
}
static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count,
loff_t * ppos)
{
struct usb_idmouse *dev = file->private_data;
int result;
/* lock this object */
mutex_lock(&dev->lock);
/* verify that the device wasn't unplugged */
if (!dev->present) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
result = simple_read_from_buffer(buffer, count, ppos,
dev->bulk_in_buffer, IMGSIZE);
/* unlock the device */
mutex_unlock(&dev->lock);
return result;
}
static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_idmouse *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
int result;
/* check if we have gotten the data or the hid interface */
iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
if (iface_desc->desc.bNumEndpoints < 1)
return -ENODEV;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
return -ENOMEM;
mutex_init(&dev->lock);
dev->udev = udev;
dev->interface = interface;
/* set up the endpoint information - use only the first bulk-in endpoint */
result = usb_find_bulk_in_endpoint(iface_desc, &endpoint);
if (result) {
dev_err(&interface->dev, "Unable to find bulk-in endpoint.\n");
idmouse_delete(dev);
return result;
}
dev->orig_bi_size = usb_endpoint_maxp(endpoint);
dev->bulk_in_size = 0x200; /* works _much_ faster */
dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
dev->bulk_in_buffer = kmalloc(IMGSIZE + dev->bulk_in_size, GFP_KERNEL);
if (!dev->bulk_in_buffer) {
idmouse_delete(dev);
return -ENOMEM;
}
/* allow device read, write and ioctl */
dev->present = 1;
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
result = usb_register_dev(interface, &idmouse_class);
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
idmouse_delete(dev);
return result;
}
/* be noisy */
dev_info(&interface->dev,"%s now attached\n",DRIVER_DESC);
return 0;
}
static void idmouse_disconnect(struct usb_interface *interface)
{
struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
/* lock the device */
mutex_lock(&dev->lock);
/* prevent device read, write and ioctl */
dev->present = 0;
/* if the device is opened, idmouse_release will clean this up */
if (!dev->open) {
mutex_unlock(&dev->lock);
idmouse_delete(dev);
} else {
/* unlock */
mutex_unlock(&dev->lock);
}
dev_info(&interface->dev, "disconnected\n");
}
module_usb_driver(idmouse_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/idmouse.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström ([email protected])
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
/* include firmware (variables)*/
/* FIXME: This is quick and dirty solution! */
#define SPDIF /* if you want SPDIF comment next line */
//#undef SPDIF /* if you want MIDI uncomment this line */
#ifdef SPDIF
#define FIRMWARE_FW "emi62/spdif.fw"
#else
#define FIRMWARE_FW "emi62/midi.fw"
#endif
#define EMI62_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */
#define EMI62_PRODUCT_ID 0x0110 /* EMI 6|2m without firmware */
#define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */
#define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */
#define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */
#define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */
#define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */
#define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS)
static int emi62_writememory(struct usb_device *dev, int address,
const unsigned char *data, int length,
__u8 bRequest);
static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit);
static int emi62_load_firmware (struct usb_device *dev);
static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void emi62_disconnect(struct usb_interface *intf);
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi62_writememory(struct usb_device *dev, int address,
const unsigned char *data, int length,
__u8 request)
{
int result;
unsigned char *buffer = kmemdup(data, length, GFP_KERNEL);
if (!buffer) {
dev_err(&dev->dev, "kmalloc(%d) failed.\n", length);
return -ENOMEM;
}
/* Note: usb_control_msg returns negative value on error or length of the
* data that was written! */
result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300);
kfree (buffer);
return result;
}
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi62_set_reset (struct usb_device *dev, unsigned char reset_bit)
{
int response;
dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit);
response = emi62_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0);
if (response < 0)
dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit);
return response;
}
#define FW_LOAD_SIZE 1023
static int emi62_load_firmware (struct usb_device *dev)
{
const struct firmware *loader_fw = NULL;
const struct firmware *bitstream_fw = NULL;
const struct firmware *firmware_fw = NULL;
const struct ihex_binrec *rec;
int err = -ENOMEM;
int i;
__u32 addr; /* Address to write */
__u8 *buf;
dev_dbg(&dev->dev, "load_firmware\n");
buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL);
if (!buf)
goto wraperr;
err = request_ihex_firmware(&loader_fw, "emi62/loader.fw", &dev->dev);
if (err)
goto nofw;
err = request_ihex_firmware(&bitstream_fw, "emi62/bitstream.fw",
&dev->dev);
if (err)
goto nofw;
err = request_ihex_firmware(&firmware_fw, FIRMWARE_FW, &dev->dev);
if (err) {
nofw:
goto wraperr;
}
/* Assert reset (stop the CPU in the EMI) */
err = emi62_set_reset(dev,1);
if (err < 0)
goto wraperr;
rec = (const struct ihex_binrec *)loader_fw->data;
/* 1. We need to put the loader for the FPGA into the EZ-USB */
while (rec) {
err = emi62_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_INTERNAL);
if (err < 0)
goto wraperr;
rec = ihex_next_binrec(rec);
}
/* De-assert reset (let the CPU run) */
err = emi62_set_reset(dev,0);
if (err < 0)
goto wraperr;
msleep(250); /* let device settle */
/* 2. We upload the FPGA firmware into the EMI
* Note: collect up to 1023 (yes!) bytes and send them with
* a single request. This is _much_ faster! */
rec = (const struct ihex_binrec *)bitstream_fw->data;
do {
i = 0;
addr = be32_to_cpu(rec->addr);
/* intel hex records are terminated with type 0 element */
while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) {
memcpy(buf + i, rec->data, be16_to_cpu(rec->len));
i += be16_to_cpu(rec->len);
rec = ihex_next_binrec(rec);
}
err = emi62_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA);
if (err < 0)
goto wraperr;
} while (rec);
/* Assert reset (stop the CPU in the EMI) */
err = emi62_set_reset(dev,1);
if (err < 0)
goto wraperr;
/* 3. We need to put the loader for the firmware into the EZ-USB (again...) */
for (rec = (const struct ihex_binrec *)loader_fw->data;
rec; rec = ihex_next_binrec(rec)) {
err = emi62_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_INTERNAL);
if (err < 0)
goto wraperr;
}
/* De-assert reset (let the CPU run) */
err = emi62_set_reset(dev,0);
if (err < 0)
goto wraperr;
msleep(250); /* let device settle */
/* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */
for (rec = (const struct ihex_binrec *)firmware_fw->data;
rec; rec = ihex_next_binrec(rec)) {
if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) {
err = emi62_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_EXTERNAL);
if (err < 0)
goto wraperr;
}
}
/* Assert reset (stop the CPU in the EMI) */
err = emi62_set_reset(dev,1);
if (err < 0)
goto wraperr;
for (rec = (const struct ihex_binrec *)firmware_fw->data;
rec; rec = ihex_next_binrec(rec)) {
if (INTERNAL_RAM(be32_to_cpu(rec->addr))) {
err = emi62_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_EXTERNAL);
if (err < 0)
goto wraperr;
}
}
/* De-assert reset (let the CPU run) */
err = emi62_set_reset(dev,0);
if (err < 0)
goto wraperr;
msleep(250); /* let device settle */
release_firmware(loader_fw);
release_firmware(bitstream_fw);
release_firmware(firmware_fw);
kfree(buf);
/* return 1 to fail the driver inialization
* and give real driver change to load */
return 1;
wraperr:
if (err < 0)
dev_err(&dev->dev,"%s - error loading firmware: error = %d\n",
__func__, err);
release_firmware(loader_fw);
release_firmware(bitstream_fw);
release_firmware(firmware_fw);
kfree(buf);
dev_err(&dev->dev, "Error\n");
return err;
}
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, id_table);
static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
dev_dbg(&intf->dev, "emi62_probe\n");
dev_info(&intf->dev, "%s start\n", __func__);
emi62_load_firmware(dev);
/* do not return the driver context, let real audio driver do that */
return -EIO;
}
static void emi62_disconnect(struct usb_interface *intf)
{
}
static struct usb_driver emi62_driver = {
.name = "emi62 - firmware loader",
.probe = emi62_probe,
.disconnect = emi62_disconnect,
.id_table = id_table,
};
module_usb_driver(emi62_driver);
MODULE_AUTHOR("Tapio Laxström");
MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader.");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("emi62/loader.fw");
MODULE_FIRMWARE("emi62/bitstream.fw");
MODULE_FIRMWARE(FIRMWARE_FW);
/* vi:ai:syntax=c:sw=8:ts=8:tw=80
*/
| linux-master | drivers/usb/misc/emi62.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström ([email protected])
*
* emi26.c,v 1.13 2002/03/08 13:10:26 tapio Exp
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/ihex.h>
#define EMI26_VENDOR_ID 0x086a /* Emagic Soft-und Hardware GmBH */
#define EMI26_PRODUCT_ID 0x0100 /* EMI 2|6 without firmware */
#define EMI26B_PRODUCT_ID 0x0102 /* EMI 2|6 without firmware */
#define ANCHOR_LOAD_INTERNAL 0xA0 /* Vendor specific request code for Anchor Upload/Download (This one is implemented in the core) */
#define ANCHOR_LOAD_EXTERNAL 0xA3 /* This command is not implemented in the core. Requires firmware */
#define ANCHOR_LOAD_FPGA 0xA5 /* This command is not implemented in the core. Requires firmware. Emagic extension */
#define MAX_INTERNAL_ADDRESS 0x1B3F /* This is the highest internal RAM address for the AN2131Q */
#define CPUCS_REG 0x7F92 /* EZ-USB Control and Status Register. Bit 0 controls 8051 reset */
#define INTERNAL_RAM(address) (address <= MAX_INTERNAL_ADDRESS)
static int emi26_writememory( struct usb_device *dev, int address,
const unsigned char *data, int length,
__u8 bRequest);
static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit);
static int emi26_load_firmware (struct usb_device *dev);
static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void emi26_disconnect(struct usb_interface *intf);
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi26_writememory (struct usb_device *dev, int address,
const unsigned char *data, int length,
__u8 request)
{
int result;
unsigned char *buffer = kmemdup(data, length, GFP_KERNEL);
if (!buffer) {
dev_err(&dev->dev, "kmalloc(%d) failed.\n", length);
return -ENOMEM;
}
/* Note: usb_control_msg returns negative value on error or length of the
* data that was written! */
result = usb_control_msg (dev, usb_sndctrlpipe(dev, 0), request, 0x40, address, 0, buffer, length, 300);
kfree (buffer);
return result;
}
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi26_set_reset (struct usb_device *dev, unsigned char reset_bit)
{
int response;
dev_info(&dev->dev, "%s - %d\n", __func__, reset_bit);
/* printk(KERN_DEBUG "%s - %d", __func__, reset_bit); */
response = emi26_writememory (dev, CPUCS_REG, &reset_bit, 1, 0xa0);
if (response < 0) {
dev_err(&dev->dev, "set_reset (%d) failed\n", reset_bit);
}
return response;
}
#define FW_LOAD_SIZE 1023
static int emi26_load_firmware (struct usb_device *dev)
{
const struct firmware *loader_fw = NULL;
const struct firmware *bitstream_fw = NULL;
const struct firmware *firmware_fw = NULL;
const struct ihex_binrec *rec;
int err = -ENOMEM;
int i;
__u32 addr; /* Address to write */
__u8 *buf;
buf = kmalloc(FW_LOAD_SIZE, GFP_KERNEL);
if (!buf)
goto wraperr;
err = request_ihex_firmware(&loader_fw, "emi26/loader.fw", &dev->dev);
if (err)
goto nofw;
err = request_ihex_firmware(&bitstream_fw, "emi26/bitstream.fw",
&dev->dev);
if (err)
goto nofw;
err = request_ihex_firmware(&firmware_fw, "emi26/firmware.fw",
&dev->dev);
if (err) {
nofw:
dev_err(&dev->dev, "%s - request_firmware() failed\n",
__func__);
goto wraperr;
}
/* Assert reset (stop the CPU in the EMI) */
err = emi26_set_reset(dev,1);
if (err < 0)
goto wraperr;
rec = (const struct ihex_binrec *)loader_fw->data;
/* 1. We need to put the loader for the FPGA into the EZ-USB */
while (rec) {
err = emi26_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_INTERNAL);
if (err < 0)
goto wraperr;
rec = ihex_next_binrec(rec);
}
/* De-assert reset (let the CPU run) */
err = emi26_set_reset(dev,0);
if (err < 0)
goto wraperr;
msleep(250); /* let device settle */
/* 2. We upload the FPGA firmware into the EMI
* Note: collect up to 1023 (yes!) bytes and send them with
* a single request. This is _much_ faster! */
rec = (const struct ihex_binrec *)bitstream_fw->data;
do {
i = 0;
addr = be32_to_cpu(rec->addr);
/* intel hex records are terminated with type 0 element */
while (rec && (i + be16_to_cpu(rec->len) < FW_LOAD_SIZE)) {
memcpy(buf + i, rec->data, be16_to_cpu(rec->len));
i += be16_to_cpu(rec->len);
rec = ihex_next_binrec(rec);
}
err = emi26_writememory(dev, addr, buf, i, ANCHOR_LOAD_FPGA);
if (err < 0)
goto wraperr;
} while (rec);
/* Assert reset (stop the CPU in the EMI) */
err = emi26_set_reset(dev,1);
if (err < 0)
goto wraperr;
/* 3. We need to put the loader for the firmware into the EZ-USB (again...) */
for (rec = (const struct ihex_binrec *)loader_fw->data;
rec; rec = ihex_next_binrec(rec)) {
err = emi26_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_INTERNAL);
if (err < 0)
goto wraperr;
}
msleep(250); /* let device settle */
/* De-assert reset (let the CPU run) */
err = emi26_set_reset(dev,0);
if (err < 0)
goto wraperr;
/* 4. We put the part of the firmware that lies in the external RAM into the EZ-USB */
for (rec = (const struct ihex_binrec *)firmware_fw->data;
rec; rec = ihex_next_binrec(rec)) {
if (!INTERNAL_RAM(be32_to_cpu(rec->addr))) {
err = emi26_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_EXTERNAL);
if (err < 0)
goto wraperr;
}
}
/* Assert reset (stop the CPU in the EMI) */
err = emi26_set_reset(dev,1);
if (err < 0)
goto wraperr;
for (rec = (const struct ihex_binrec *)firmware_fw->data;
rec; rec = ihex_next_binrec(rec)) {
if (INTERNAL_RAM(be32_to_cpu(rec->addr))) {
err = emi26_writememory(dev, be32_to_cpu(rec->addr),
rec->data, be16_to_cpu(rec->len),
ANCHOR_LOAD_INTERNAL);
if (err < 0)
goto wraperr;
}
}
/* De-assert reset (let the CPU run) */
err = emi26_set_reset(dev,0);
if (err < 0)
goto wraperr;
msleep(250); /* let device settle */
/* return 1 to fail the driver inialization
* and give real driver change to load */
err = 1;
wraperr:
if (err < 0)
dev_err(&dev->dev,"%s - error loading firmware: error = %d\n",
__func__, err);
release_firmware(loader_fw);
release_firmware(bitstream_fw);
release_firmware(firmware_fw);
kfree(buf);
return err;
}
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, id_table);
static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
dev_info(&intf->dev, "%s start\n", __func__);
emi26_load_firmware(dev);
/* do not return the driver context, let real audio driver do that */
return -EIO;
}
static void emi26_disconnect(struct usb_interface *intf)
{
}
static struct usb_driver emi26_driver = {
.name = "emi26 - firmware loader",
.probe = emi26_probe,
.disconnect = emi26_disconnect,
.id_table = id_table,
};
module_usb_driver(emi26_driver);
MODULE_AUTHOR("Tapio Laxström");
MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader.");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("emi26/loader.fw");
MODULE_FIRMWARE("emi26/bitstream.fw");
MODULE_FIRMWARE("emi26/firmware.fw");
/* vi:ai:syntax=c:sw=8:ts=8:tw=80
*/
| linux-master | drivers/usb/misc/emi26.c |
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
* Main part
*
* Copyright (C) 2005 by Thomas Winischhofer, Vienna, Austria
*
* If distributed as part of the Linux kernel, this code is licensed under the
* terms of the GPL v2.
*
* Otherwise, the following license terms apply:
*
* * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* * are met:
* * 1) Redistributions of source code must retain the above copyright
* * notice, this list of conditions and the following disclaimer.
* * 2) Redistributions in binary form must reproduce the above copyright
* * notice, this list of conditions and the following disclaimer in the
* * documentation and/or other materials provided with the distribution.
* * 3) The name of the author may not be used to endorse or promote products
* * derived from this software without specific psisusbr written permission.
* *
* * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESSED OR
* * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Author: Thomas Winischhofer <[email protected]>
*
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include "sisusb.h"
#define SISUSB_DONTSYNC
/* Forward declarations / clean-up routines */
static struct usb_driver sisusb_driver;
static void sisusb_free_buffers(struct sisusb_usb_data *sisusb)
{
int i;
for (i = 0; i < NUMOBUFS; i++) {
kfree(sisusb->obuf[i]);
sisusb->obuf[i] = NULL;
}
kfree(sisusb->ibuf);
sisusb->ibuf = NULL;
}
static void sisusb_free_urbs(struct sisusb_usb_data *sisusb)
{
int i;
for (i = 0; i < NUMOBUFS; i++) {
usb_free_urb(sisusb->sisurbout[i]);
sisusb->sisurbout[i] = NULL;
}
usb_free_urb(sisusb->sisurbin);
sisusb->sisurbin = NULL;
}
/* Level 0: USB transport layer */
/* 1. out-bulks */
/* out-urb management */
/* Return 1 if all free, 0 otherwise */
static int sisusb_all_free(struct sisusb_usb_data *sisusb)
{
int i;
for (i = 0; i < sisusb->numobufs; i++) {
if (sisusb->urbstatus[i] & SU_URB_BUSY)
return 0;
}
return 1;
}
/* Kill all busy URBs */
static void sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
{
int i;
if (sisusb_all_free(sisusb))
return;
for (i = 0; i < sisusb->numobufs; i++) {
if (sisusb->urbstatus[i] & SU_URB_BUSY)
usb_kill_urb(sisusb->sisurbout[i]);
}
}
/* Return 1 if ok, 0 if error (not all complete within timeout) */
static int sisusb_wait_all_out_complete(struct sisusb_usb_data *sisusb)
{
int timeout = 5 * HZ, i = 1;
wait_event_timeout(sisusb->wait_q, (i = sisusb_all_free(sisusb)),
timeout);
return i;
}
static int sisusb_outurb_available(struct sisusb_usb_data *sisusb)
{
int i;
for (i = 0; i < sisusb->numobufs; i++) {
if ((sisusb->urbstatus[i] & (SU_URB_BUSY|SU_URB_ALLOC)) == 0)
return i;
}
return -1;
}
static int sisusb_get_free_outbuf(struct sisusb_usb_data *sisusb)
{
int i, timeout = 5 * HZ;
wait_event_timeout(sisusb->wait_q,
((i = sisusb_outurb_available(sisusb)) >= 0), timeout);
return i;
}
static int sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
{
int i;
i = sisusb_outurb_available(sisusb);
if (i >= 0)
sisusb->urbstatus[i] |= SU_URB_ALLOC;
return i;
}
static void sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
{
if ((index >= 0) && (index < sisusb->numobufs))
sisusb->urbstatus[index] &= ~SU_URB_ALLOC;
}
/* completion callback */
static void sisusb_bulk_completeout(struct urb *urb)
{
struct sisusb_urb_context *context = urb->context;
struct sisusb_usb_data *sisusb;
if (!context)
return;
sisusb = context->sisusb;
if (!sisusb || !sisusb->sisusb_dev || !sisusb->present)
return;
#ifndef SISUSB_DONTSYNC
if (context->actual_length)
*(context->actual_length) += urb->actual_length;
#endif
sisusb->urbstatus[context->urbindex] &= ~SU_URB_BUSY;
wake_up(&sisusb->wait_q);
}
static int sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index,
unsigned int pipe, void *data, int len, int *actual_length,
int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbout[index];
int retval, byteswritten = 0;
/* Set up URB */
urb->transfer_flags = 0;
usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
sisusb_bulk_completeout,
&sisusb->urbout_context[index]);
urb->transfer_flags |= tflags;
urb->actual_length = 0;
/* Set up context */
sisusb->urbout_context[index].actual_length = (timeout) ?
NULL : actual_length;
/* Declare this urb/buffer in use */
sisusb->urbstatus[index] |= SU_URB_BUSY;
/* Submit URB */
retval = usb_submit_urb(urb, GFP_KERNEL);
/* If OK, and if timeout > 0, wait for completion */
if ((retval == 0) && timeout) {
wait_event_timeout(sisusb->wait_q,
(!(sisusb->urbstatus[index] & SU_URB_BUSY)),
timeout);
if (sisusb->urbstatus[index] & SU_URB_BUSY) {
/* URB timed out... kill it and report error */
usb_kill_urb(urb);
retval = -ETIMEDOUT;
} else {
/* Otherwise, report urb status */
retval = urb->status;
byteswritten = urb->actual_length;
}
}
if (actual_length)
*actual_length = byteswritten;
return retval;
}
/* 2. in-bulks */
/* completion callback */
static void sisusb_bulk_completein(struct urb *urb)
{
struct sisusb_usb_data *sisusb = urb->context;
if (!sisusb || !sisusb->sisusb_dev || !sisusb->present)
return;
sisusb->completein = 1;
wake_up(&sisusb->wait_q);
}
static int sisusb_bulkin_msg(struct sisusb_usb_data *sisusb,
unsigned int pipe, void *data, int len,
int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbin;
int retval, readbytes = 0;
urb->transfer_flags = 0;
usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
sisusb_bulk_completein, sisusb);
urb->transfer_flags |= tflags;
urb->actual_length = 0;
sisusb->completein = 0;
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval == 0) {
wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
if (!sisusb->completein) {
/* URB timed out... kill it and report error */
usb_kill_urb(urb);
retval = -ETIMEDOUT;
} else {
/* URB completed within timeout */
retval = urb->status;
readbytes = urb->actual_length;
}
}
if (actual_length)
*actual_length = readbytes;
return retval;
}
/* Level 1: */
/* Send a bulk message of variable size
*
* To copy the data from userspace, give pointer to "userbuffer",
* to copy from (non-DMA) kernel memory, give "kernbuffer". If
* both of these are NULL, it is assumed, that the transfer
* buffer "sisusb->obuf[index]" is set up with the data to send.
* Index is ignored if either kernbuffer or userbuffer is set.
* If async is nonzero, URBs will be sent without waiting for
* completion of the previous URB.
*
* (return 0 on success)
*/
static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
char *kernbuffer, const char __user *userbuffer, int index,
ssize_t *bytes_written, unsigned int tflags, int async)
{
int result = 0, retry, count = len;
int passsize, thispass, transferred_len = 0;
int fromuser = (userbuffer != NULL) ? 1 : 0;
int fromkern = (kernbuffer != NULL) ? 1 : 0;
unsigned int pipe;
char *buffer;
(*bytes_written) = 0;
/* Sanity check */
if (!sisusb || !sisusb->present || !sisusb->sisusb_dev)
return -ENODEV;
/* If we copy data from kernel or userspace, force the
* allocation of a buffer/urb. If we have the data in
* the transfer buffer[index] already, reuse the buffer/URB
* if the length is > buffer size. (So, transmitting
* large data amounts directly from the transfer buffer
* treats the buffer as a ring buffer. However, we need
* to sync in this case.)
*/
if (fromuser || fromkern)
index = -1;
else if (len > sisusb->obufsize)
async = 0;
pipe = usb_sndbulkpipe(sisusb->sisusb_dev, ep);
do {
passsize = thispass = (sisusb->obufsize < count) ?
sisusb->obufsize : count;
if (index < 0)
index = sisusb_get_free_outbuf(sisusb);
if (index < 0)
return -EIO;
buffer = sisusb->obuf[index];
if (fromuser) {
if (copy_from_user(buffer, userbuffer, passsize))
return -EFAULT;
userbuffer += passsize;
} else if (fromkern) {
memcpy(buffer, kernbuffer, passsize);
kernbuffer += passsize;
}
retry = 5;
while (thispass) {
if (!sisusb->sisusb_dev)
return -ENODEV;
result = sisusb_bulkout_msg(sisusb, index, pipe,
buffer, thispass, &transferred_len,
async ? 0 : 5 * HZ, tflags);
if (result == -ETIMEDOUT) {
/* Will not happen if async */
if (!retry--)
return -ETIME;
continue;
}
if ((result == 0) && !async && transferred_len) {
thispass -= transferred_len;
buffer += transferred_len;
} else
break;
}
if (result)
return result;
(*bytes_written) += passsize;
count -= passsize;
/* Force new allocation in next iteration */
if (fromuser || fromkern)
index = -1;
} while (count > 0);
if (async) {
#ifdef SISUSB_DONTSYNC
(*bytes_written) = len;
/* Some URBs/buffers might be busy */
#else
sisusb_wait_all_out_complete(sisusb);
(*bytes_written) = transferred_len;
/* All URBs and all buffers are available */
#endif
}
return ((*bytes_written) == len) ? 0 : -EIO;
}
/* Receive a bulk message of variable size
*
* To copy the data to userspace, give pointer to "userbuffer",
* to copy to kernel memory, give "kernbuffer". One of them
* MUST be set. (There is no technique for letting the caller
* read directly from the ibuf.)
*
*/
static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
void *kernbuffer, char __user *userbuffer, ssize_t *bytes_read,
unsigned int tflags)
{
int result = 0, retry, count = len;
int bufsize, thispass, transferred_len;
unsigned int pipe;
char *buffer;
(*bytes_read) = 0;
/* Sanity check */
if (!sisusb || !sisusb->present || !sisusb->sisusb_dev)
return -ENODEV;
pipe = usb_rcvbulkpipe(sisusb->sisusb_dev, ep);
buffer = sisusb->ibuf;
bufsize = sisusb->ibufsize;
retry = 5;
#ifdef SISUSB_DONTSYNC
if (!(sisusb_wait_all_out_complete(sisusb)))
return -EIO;
#endif
while (count > 0) {
if (!sisusb->sisusb_dev)
return -ENODEV;
thispass = (bufsize < count) ? bufsize : count;
result = sisusb_bulkin_msg(sisusb, pipe, buffer, thispass,
&transferred_len, 5 * HZ, tflags);
if (transferred_len)
thispass = transferred_len;
else if (result == -ETIMEDOUT) {
if (!retry--)
return -ETIME;
continue;
} else
return -EIO;
if (thispass) {
(*bytes_read) += thispass;
count -= thispass;
if (userbuffer) {
if (copy_to_user(userbuffer, buffer, thispass))
return -EFAULT;
userbuffer += thispass;
} else {
memcpy(kernbuffer, buffer, thispass);
kernbuffer += thispass;
}
}
}
return ((*bytes_read) == len) ? 0 : -EIO;
}
static int sisusb_send_packet(struct sisusb_usb_data *sisusb, int len,
struct sisusb_packet *packet)
{
int ret;
ssize_t bytes_transferred = 0;
__le32 tmp;
if (len == 6)
packet->data = 0;
#ifdef SISUSB_DONTSYNC
if (!(sisusb_wait_all_out_complete(sisusb)))
return 1;
#endif
/* Eventually correct endianness */
SISUSB_CORRECT_ENDIANNESS_PACKET(packet);
/* 1. send the packet */
ret = sisusb_send_bulk_msg(sisusb, SISUSB_EP_GFX_OUT, len,
(char *)packet, NULL, 0, &bytes_transferred, 0, 0);
if ((ret == 0) && (len == 6)) {
/* 2. if packet len == 6, it means we read, so wait for 32bit
* return value and write it to packet->data
*/
ret = sisusb_recv_bulk_msg(sisusb, SISUSB_EP_GFX_IN, 4,
(char *)&tmp, NULL, &bytes_transferred, 0);
packet->data = le32_to_cpu(tmp);
}
return ret;
}
static int sisusb_send_bridge_packet(struct sisusb_usb_data *sisusb, int len,
struct sisusb_packet *packet, unsigned int tflags)
{
int ret;
ssize_t bytes_transferred = 0;
__le32 tmp;
if (len == 6)
packet->data = 0;
#ifdef SISUSB_DONTSYNC
if (!(sisusb_wait_all_out_complete(sisusb)))
return 1;
#endif
/* Eventually correct endianness */
SISUSB_CORRECT_ENDIANNESS_PACKET(packet);
/* 1. send the packet */
ret = sisusb_send_bulk_msg(sisusb, SISUSB_EP_BRIDGE_OUT, len,
(char *)packet, NULL, 0, &bytes_transferred, tflags, 0);
if ((ret == 0) && (len == 6)) {
/* 2. if packet len == 6, it means we read, so wait for 32bit
* return value and write it to packet->data
*/
ret = sisusb_recv_bulk_msg(sisusb, SISUSB_EP_BRIDGE_IN, 4,
(char *)&tmp, NULL, &bytes_transferred, 0);
packet->data = le32_to_cpu(tmp);
}
return ret;
}
/* access video memory and mmio (return 0 on success) */
/* Low level */
/* The following routines assume being used to transfer byte, word,
* long etc.
* This means that
* - the write routines expect "data" in machine endianness format.
* The data will be converted to leXX in sisusb_xxx_packet.
* - the read routines can expect read data in machine-endianess.
*/
static int sisusb_write_memio_byte(struct sisusb_usb_data *sisusb, int type,
u32 addr, u8 data)
{
struct sisusb_packet packet;
packet.header = (1 << (addr & 3)) | (type << 6);
packet.address = addr & ~3;
packet.data = data << ((addr & 3) << 3);
return sisusb_send_packet(sisusb, 10, &packet);
}
static int sisusb_write_memio_word(struct sisusb_usb_data *sisusb, int type,
u32 addr, u16 data)
{
struct sisusb_packet packet;
int ret = 0;
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x0003;
packet.data = (u32)data;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 1:
packet.header = (type << 6) | 0x0006;
packet.data = (u32)data << 8;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 2:
packet.header = (type << 6) | 0x000c;
packet.data = (u32)data << 16;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 3:
packet.header = (type << 6) | 0x0008;
packet.data = (u32)data << 24;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
packet.data = (u32)data >> 8;
ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
}
static int sisusb_write_memio_24bit(struct sisusb_usb_data *sisusb, int type,
u32 addr, u32 data)
{
struct sisusb_packet packet;
int ret = 0;
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x0007;
packet.data = data & 0x00ffffff;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 1:
packet.header = (type << 6) | 0x000e;
packet.data = data << 8;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 2:
packet.header = (type << 6) | 0x000c;
packet.data = data << 16;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
packet.data = (data >> 16) & 0x00ff;
ret |= sisusb_send_packet(sisusb, 10, &packet);
break;
case 3:
packet.header = (type << 6) | 0x0008;
packet.data = data << 24;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0003;
packet.address = (addr & ~3) + 4;
packet.data = (data >> 8) & 0xffff;
ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
}
static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
u32 addr, u32 data)
{
struct sisusb_packet packet;
int ret = 0;
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x000f;
packet.data = data;
ret = sisusb_send_packet(sisusb, 10, &packet);
break;
case 1:
packet.header = (type << 6) | 0x000e;
packet.data = data << 8;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
packet.data = data >> 24;
ret |= sisusb_send_packet(sisusb, 10, &packet);
break;
case 2:
packet.header = (type << 6) | 0x000c;
packet.data = data << 16;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0003;
packet.address = (addr & ~3) + 4;
packet.data = data >> 16;
ret |= sisusb_send_packet(sisusb, 10, &packet);
break;
case 3:
packet.header = (type << 6) | 0x0008;
packet.data = data << 24;
ret = sisusb_send_packet(sisusb, 10, &packet);
packet.header = (type << 6) | 0x0007;
packet.address = (addr & ~3) + 4;
packet.data = data >> 8;
ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
}
/* The xxx_bulk routines copy a buffer of variable size. They treat the
* buffer as chars, therefore lsb/msb has to be corrected if using the
* byte/word/long/etc routines for speed-up
*
* If data is from userland, set "userbuffer" (and clear "kernbuffer"),
* if data is in kernel space, set "kernbuffer" (and clear "userbuffer");
* if neither "kernbuffer" nor "userbuffer" are given, it is assumed
* that the data already is in the transfer buffer "sisusb->obuf[index]".
*/
static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
char *kernbuffer, int length, const char __user *userbuffer,
int index, ssize_t *bytes_written)
{
struct sisusb_packet packet;
int ret = 0;
static int msgcount;
u8 swap8, fromkern = kernbuffer ? 1 : 0;
u16 swap16;
u32 swap32, flag = (length >> 28) & 1;
u8 buf[4];
/* if neither kernbuffer not userbuffer are given, assume
* data in obuf
*/
if (!fromkern && !userbuffer)
kernbuffer = sisusb->obuf[index];
(*bytes_written = 0);
length &= 0x00ffffff;
while (length) {
switch (length) {
case 1:
if (userbuffer) {
if (get_user(swap8, (u8 __user *)userbuffer))
return -EFAULT;
} else
swap8 = kernbuffer[0];
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM,
addr, swap8);
if (!ret)
(*bytes_written)++;
return ret;
case 2:
if (userbuffer) {
if (get_user(swap16, (u16 __user *)userbuffer))
return -EFAULT;
} else
swap16 = *((u16 *)kernbuffer);
ret = sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
addr, swap16);
if (!ret)
(*bytes_written) += 2;
return ret;
case 3:
if (userbuffer) {
if (copy_from_user(&buf, userbuffer, 3))
return -EFAULT;
#ifdef __BIG_ENDIAN
swap32 = (buf[0] << 16) |
(buf[1] << 8) |
buf[2];
#else
swap32 = (buf[2] << 16) |
(buf[1] << 8) |
buf[0];
#endif
} else
#ifdef __BIG_ENDIAN
swap32 = (kernbuffer[0] << 16) |
(kernbuffer[1] << 8) |
kernbuffer[2];
#else
swap32 = (kernbuffer[2] << 16) |
(kernbuffer[1] << 8) |
kernbuffer[0];
#endif
ret = sisusb_write_memio_24bit(sisusb, SISUSB_TYPE_MEM,
addr, swap32);
if (!ret)
(*bytes_written) += 3;
return ret;
case 4:
if (userbuffer) {
if (get_user(swap32, (u32 __user *)userbuffer))
return -EFAULT;
} else
swap32 = *((u32 *)kernbuffer);
ret = sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM,
addr, swap32);
if (!ret)
(*bytes_written) += 4;
return ret;
default:
if ((length & ~3) > 0x10000) {
packet.header = 0x001f;
packet.address = 0x000001d4;
packet.data = addr;
ret = sisusb_send_bridge_packet(sisusb, 10,
&packet, 0);
packet.header = 0x001f;
packet.address = 0x000001d0;
packet.data = (length & ~3);
ret |= sisusb_send_bridge_packet(sisusb, 10,
&packet, 0);
packet.header = 0x001f;
packet.address = 0x000001c0;
packet.data = flag | 0x16;
ret |= sisusb_send_bridge_packet(sisusb, 10,
&packet, 0);
if (userbuffer) {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
NULL, userbuffer, 0,
bytes_written, 0, 1);
userbuffer += (*bytes_written);
} else if (fromkern) {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
kernbuffer, NULL, 0,
bytes_written, 0, 1);
kernbuffer += (*bytes_written);
} else {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
NULL, NULL, index,
bytes_written, 0, 1);
kernbuffer += ((*bytes_written) &
(sisusb->obufsize-1));
}
} else {
packet.header = 0x001f;
packet.address = 0x00000194;
packet.data = addr;
ret = sisusb_send_bridge_packet(sisusb, 10,
&packet, 0);
packet.header = 0x001f;
packet.address = 0x00000190;
packet.data = (length & ~3);
ret |= sisusb_send_bridge_packet(sisusb, 10,
&packet, 0);
if (sisusb->flagb0 != 0x16) {
packet.header = 0x001f;
packet.address = 0x00000180;
packet.data = flag | 0x16;
ret |= sisusb_send_bridge_packet(sisusb,
10, &packet, 0);
sisusb->flagb0 = 0x16;
}
if (userbuffer) {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
NULL, userbuffer, 0,
bytes_written, 0, 1);
userbuffer += (*bytes_written);
} else if (fromkern) {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
kernbuffer, NULL, 0,
bytes_written, 0, 1);
kernbuffer += (*bytes_written);
} else {
ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
NULL, NULL, index,
bytes_written, 0, 1);
kernbuffer += ((*bytes_written) &
(sisusb->obufsize-1));
}
}
if (ret) {
msgcount++;
if (msgcount < 500)
dev_err(&sisusb->sisusb_dev->dev,
"Wrote %zd of %d bytes, error %d\n",
*bytes_written, length,
ret);
else if (msgcount == 500)
dev_err(&sisusb->sisusb_dev->dev,
"Too many errors, logging stopped\n");
}
addr += (*bytes_written);
length -= (*bytes_written);
}
if (ret)
break;
}
return ret ? -EIO : 0;
}
/* Remember: Read data in packet is in machine-endianess! So for
* byte, word, 24bit, long no endian correction is necessary.
*/
static int sisusb_read_memio_byte(struct sisusb_usb_data *sisusb, int type,
u32 addr, u8 *data)
{
struct sisusb_packet packet;
int ret;
CLEARPACKET(&packet);
packet.header = (1 << (addr & 3)) | (type << 6);
packet.address = addr & ~3;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = (u8)(packet.data >> ((addr & 3) << 3));
return ret;
}
static int sisusb_read_memio_word(struct sisusb_usb_data *sisusb, int type,
u32 addr, u16 *data)
{
struct sisusb_packet packet;
int ret = 0;
CLEARPACKET(&packet);
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x0003;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = (u16)(packet.data);
break;
case 1:
packet.header = (type << 6) | 0x0006;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = (u16)(packet.data >> 8);
break;
case 2:
packet.header = (type << 6) | 0x000c;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = (u16)(packet.data >> 16);
break;
case 3:
packet.header = (type << 6) | 0x0008;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = (u16)(packet.data >> 24);
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= (u16)(packet.data << 8);
}
return ret;
}
static int sisusb_read_memio_24bit(struct sisusb_usb_data *sisusb, int type,
u32 addr, u32 *data)
{
struct sisusb_packet packet;
int ret = 0;
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x0007;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data & 0x00ffffff;
break;
case 1:
packet.header = (type << 6) | 0x000e;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 8;
break;
case 2:
packet.header = (type << 6) | 0x000c;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 16;
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= ((packet.data & 0xff) << 16);
break;
case 3:
packet.header = (type << 6) | 0x0008;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 24;
packet.header = (type << 6) | 0x0003;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= ((packet.data & 0xffff) << 8);
}
return ret;
}
static int sisusb_read_memio_long(struct sisusb_usb_data *sisusb, int type,
u32 addr, u32 *data)
{
struct sisusb_packet packet;
int ret = 0;
packet.address = addr & ~3;
switch (addr & 3) {
case 0:
packet.header = (type << 6) | 0x000f;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data;
break;
case 1:
packet.header = (type << 6) | 0x000e;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 8;
packet.header = (type << 6) | 0x0001;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= (packet.data << 24);
break;
case 2:
packet.header = (type << 6) | 0x000c;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 16;
packet.header = (type << 6) | 0x0003;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= (packet.data << 16);
break;
case 3:
packet.header = (type << 6) | 0x0008;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data >> 24;
packet.header = (type << 6) | 0x0007;
packet.address = (addr & ~3) + 4;
ret |= sisusb_send_packet(sisusb, 6, &packet);
*data |= (packet.data << 8);
}
return ret;
}
static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
char *kernbuffer, int length, char __user *userbuffer,
ssize_t *bytes_read)
{
int ret = 0;
char buf[4];
u16 swap16;
u32 swap32;
(*bytes_read = 0);
length &= 0x00ffffff;
while (length) {
switch (length) {
case 1:
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM,
addr, &buf[0]);
if (!ret) {
(*bytes_read)++;
if (userbuffer) {
if (put_user(buf[0], (u8 __user *)userbuffer))
return -EFAULT;
} else
kernbuffer[0] = buf[0];
}
return ret;
case 2:
ret |= sisusb_read_memio_word(sisusb, SISUSB_TYPE_MEM,
addr, &swap16);
if (!ret) {
(*bytes_read) += 2;
if (userbuffer) {
if (put_user(swap16, (u16 __user *)userbuffer))
return -EFAULT;
} else {
*((u16 *)kernbuffer) = swap16;
}
}
return ret;
case 3:
ret |= sisusb_read_memio_24bit(sisusb, SISUSB_TYPE_MEM,
addr, &swap32);
if (!ret) {
(*bytes_read) += 3;
#ifdef __BIG_ENDIAN
buf[0] = (swap32 >> 16) & 0xff;
buf[1] = (swap32 >> 8) & 0xff;
buf[2] = swap32 & 0xff;
#else
buf[2] = (swap32 >> 16) & 0xff;
buf[1] = (swap32 >> 8) & 0xff;
buf[0] = swap32 & 0xff;
#endif
if (userbuffer) {
if (copy_to_user(userbuffer,
&buf[0], 3))
return -EFAULT;
} else {
kernbuffer[0] = buf[0];
kernbuffer[1] = buf[1];
kernbuffer[2] = buf[2];
}
}
return ret;
default:
ret |= sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM,
addr, &swap32);
if (!ret) {
(*bytes_read) += 4;
if (userbuffer) {
if (put_user(swap32, (u32 __user *)userbuffer))
return -EFAULT;
userbuffer += 4;
} else {
*((u32 *)kernbuffer) = swap32;
kernbuffer += 4;
}
addr += 4;
length -= 4;
}
}
if (ret)
break;
}
return ret;
}
/* High level: Gfx (indexed) register access */
static int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data)
{
int ret;
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
return ret;
}
static int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 *data)
{
int ret;
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
return ret;
}
static int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
u8 myand, u8 myor)
{
int ret;
u8 tmp;
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, idx);
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, &tmp);
tmp &= myand;
tmp |= myor;
ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, tmp);
return ret;
}
static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
u32 port, u8 idx, u8 data, u8 mask)
{
int ret;
u8 tmp;
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, idx);
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, &tmp);
tmp &= ~(mask);
tmp |= (data & mask);
ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, tmp);
return ret;
}
static int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor)
{
return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
}
static int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand)
{
return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
}
/* Write/read video ram */
#ifdef SISUSBENDIANTEST
static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
{
static u8 srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
char destbuffer[10];
int i, j;
sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7);
for (i = 1; i <= 7; i++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"sisusb: rwtest %d bytes\n", i);
sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i);
for (j = 0; j < i; j++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"rwtest read[%d] = %x\n",
j, destbuffer[j]);
}
}
}
#endif
/* access pci config registers (reg numbers 0, 4, 8, etc) */
static int sisusb_write_pci_config(struct sisusb_usb_data *sisusb,
int regnum, u32 data)
{
struct sisusb_packet packet;
packet.header = 0x008f;
packet.address = regnum | 0x10000;
packet.data = data;
return sisusb_send_packet(sisusb, 10, &packet);
}
static int sisusb_read_pci_config(struct sisusb_usb_data *sisusb,
int regnum, u32 *data)
{
struct sisusb_packet packet;
int ret;
packet.header = 0x008f;
packet.address = (u32)regnum | 0x10000;
ret = sisusb_send_packet(sisusb, 6, &packet);
*data = packet.data;
return ret;
}
/* Clear video RAM */
static int sisusb_clear_vram(struct sisusb_usb_data *sisusb,
u32 address, int length)
{
int ret, i;
ssize_t j;
if (address < sisusb->vrambase)
return 1;
if (address >= sisusb->vrambase + sisusb->vramsize)
return 1;
if (address + length > sisusb->vrambase + sisusb->vramsize)
length = sisusb->vrambase + sisusb->vramsize - address;
if (length <= 0)
return 0;
/* allocate free buffer/urb and clear the buffer */
i = sisusb_alloc_outbuf(sisusb);
if (i < 0)
return -EBUSY;
memset(sisusb->obuf[i], 0, sisusb->obufsize);
/* We can write a length > buffer size here. The buffer
* data will simply be re-used (like a ring-buffer).
*/
ret = sisusb_write_mem_bulk(sisusb, address, NULL, length, NULL, i, &j);
/* Free the buffer/urb */
sisusb_free_outbuf(sisusb, i);
return ret;
}
/* Initialize the graphics core (return 0 on success)
* This resets the graphics hardware and puts it into
* a defined mode (640x480@60Hz)
*/
#define GETREG(r, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
#define SETREG(r, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
#define SETIREG(r, i, d) sisusb_setidxreg(sisusb, r, i, d)
#define GETIREG(r, i, d) sisusb_getidxreg(sisusb, r, i, d)
#define SETIREGOR(r, i, o) sisusb_setidxregor(sisusb, r, i, o)
#define SETIREGAND(r, i, a) sisusb_setidxregand(sisusb, r, i, a)
#define SETIREGANDOR(r, i, a, o) sisusb_setidxregandor(sisusb, r, i, a, o)
#define READL(a, d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
#define WRITEL(a, d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
#define READB(a, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
#define WRITEB(a, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
static int sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
{
int ret;
u8 tmp8;
ret = GETIREG(SISSR, 0x16, &tmp8);
if (ramtype <= 1) {
tmp8 &= 0x3f;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 |= 0x80;
ret |= SETIREG(SISSR, 0x16, tmp8);
} else {
tmp8 |= 0xc0;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 &= 0x0f;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 |= 0x80;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 &= 0x0f;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 |= 0xd0;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 &= 0x0f;
ret |= SETIREG(SISSR, 0x16, tmp8);
tmp8 |= 0xa0;
ret |= SETIREG(SISSR, 0x16, tmp8);
}
return ret;
}
static int sisusb_getbuswidth(struct sisusb_usb_data *sisusb,
int *bw, int *chab)
{
int ret;
u8 ramtype, done = 0;
u32 t0, t1, t2, t3;
u32 ramptr = SISUSB_PCI_MEMBASE;
ret = GETIREG(SISSR, 0x3a, &ramtype);
ramtype &= 3;
ret |= SETIREG(SISSR, 0x13, 0x00);
if (ramtype <= 1) {
ret |= SETIREG(SISSR, 0x14, 0x12);
ret |= SETIREGAND(SISSR, 0x15, 0xef);
} else {
ret |= SETIREG(SISSR, 0x14, 0x02);
}
ret |= sisusb_triggersr16(sisusb, ramtype);
ret |= WRITEL(ramptr + 0, 0x01234567);
ret |= WRITEL(ramptr + 4, 0x456789ab);
ret |= WRITEL(ramptr + 8, 0x89abcdef);
ret |= WRITEL(ramptr + 12, 0xcdef0123);
ret |= WRITEL(ramptr + 16, 0x55555555);
ret |= WRITEL(ramptr + 20, 0x55555555);
ret |= WRITEL(ramptr + 24, 0xffffffff);
ret |= WRITEL(ramptr + 28, 0xffffffff);
ret |= READL(ramptr + 0, &t0);
ret |= READL(ramptr + 4, &t1);
ret |= READL(ramptr + 8, &t2);
ret |= READL(ramptr + 12, &t3);
if (ramtype <= 1) {
*chab = 0; *bw = 64;
if ((t3 != 0xcdef0123) || (t2 != 0x89abcdef)) {
if ((t1 == 0x456789ab) && (t0 == 0x01234567)) {
*chab = 0; *bw = 64;
ret |= SETIREGAND(SISSR, 0x14, 0xfd);
}
}
if ((t1 != 0x456789ab) || (t0 != 0x01234567)) {
*chab = 1; *bw = 64;
ret |= SETIREGANDOR(SISSR, 0x14, 0xfc, 0x01);
ret |= sisusb_triggersr16(sisusb, ramtype);
ret |= WRITEL(ramptr + 0, 0x89abcdef);
ret |= WRITEL(ramptr + 4, 0xcdef0123);
ret |= WRITEL(ramptr + 8, 0x55555555);
ret |= WRITEL(ramptr + 12, 0x55555555);
ret |= WRITEL(ramptr + 16, 0xaaaaaaaa);
ret |= WRITEL(ramptr + 20, 0xaaaaaaaa);
ret |= READL(ramptr + 4, &t1);
if (t1 != 0xcdef0123) {
*bw = 32;
ret |= SETIREGOR(SISSR, 0x15, 0x10);
}
}
} else {
*chab = 0; *bw = 64; /* default: cha, bw = 64 */
done = 0;
if (t1 == 0x456789ab) {
if (t0 == 0x01234567) {
*chab = 0; *bw = 64;
done = 1;
}
} else {
if (t0 == 0x01234567) {
*chab = 0; *bw = 32;
ret |= SETIREG(SISSR, 0x14, 0x00);
done = 1;
}
}
if (!done) {
ret |= SETIREG(SISSR, 0x14, 0x03);
ret |= sisusb_triggersr16(sisusb, ramtype);
ret |= WRITEL(ramptr + 0, 0x01234567);
ret |= WRITEL(ramptr + 4, 0x456789ab);
ret |= WRITEL(ramptr + 8, 0x89abcdef);
ret |= WRITEL(ramptr + 12, 0xcdef0123);
ret |= WRITEL(ramptr + 16, 0x55555555);
ret |= WRITEL(ramptr + 20, 0x55555555);
ret |= WRITEL(ramptr + 24, 0xffffffff);
ret |= WRITEL(ramptr + 28, 0xffffffff);
ret |= READL(ramptr + 0, &t0);
ret |= READL(ramptr + 4, &t1);
if (t1 == 0x456789ab) {
if (t0 == 0x01234567) {
*chab = 1; *bw = 64;
return ret;
} /* else error */
} else {
if (t0 == 0x01234567) {
*chab = 1; *bw = 32;
ret |= SETIREG(SISSR, 0x14, 0x01);
} /* else error */
}
}
}
return ret;
}
static int sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
{
int ret = 0;
u32 ramptr = SISUSB_PCI_MEMBASE;
u8 tmp1, tmp2, i, j;
ret |= WRITEB(ramptr, 0xaa);
ret |= WRITEB(ramptr + 16, 0x55);
ret |= READB(ramptr, &tmp1);
ret |= READB(ramptr + 16, &tmp2);
if ((tmp1 != 0xaa) || (tmp2 != 0x55)) {
for (i = 0, j = 16; i < 2; i++, j += 16) {
ret |= GETIREG(SISSR, 0x21, &tmp1);
ret |= SETIREGAND(SISSR, 0x21, (tmp1 & 0xfb));
ret |= SETIREGOR(SISSR, 0x3c, 0x01); /* not on 330 */
ret |= SETIREGAND(SISSR, 0x3c, 0xfe); /* not on 330 */
ret |= SETIREG(SISSR, 0x21, tmp1);
ret |= WRITEB(ramptr + 16 + j, j);
ret |= READB(ramptr + 16 + j, &tmp1);
if (tmp1 == j) {
ret |= WRITEB(ramptr + j, j);
break;
}
}
}
return ret;
}
static int sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret,
int index, u8 rankno, u8 chab, const u8 dramtype[][5], int bw)
{
int ret = 0, ranksize;
u8 tmp;
*iret = 0;
if ((rankno == 2) && (dramtype[index][0] == 2))
return ret;
ranksize = dramtype[index][3] / 2 * bw / 32;
if ((ranksize * rankno) > 128)
return ret;
tmp = 0;
while ((ranksize >>= 1) > 0)
tmp += 0x10;
tmp |= ((rankno - 1) << 2);
tmp |= ((bw / 64) & 0x02);
tmp |= (chab & 0x01);
ret = SETIREG(SISSR, 0x14, tmp);
ret |= sisusb_triggersr16(sisusb, 0); /* sic! */
*iret = 1;
return ret;
}
static int sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret,
u32 inc, int testn)
{
int ret = 0, i;
u32 j, tmp;
*iret = 0;
for (i = 0, j = 0; i < testn; i++) {
ret |= WRITEL(sisusb->vrambase + j, j);
j += inc;
}
for (i = 0, j = 0; i < testn; i++) {
ret |= READL(sisusb->vrambase + j, &tmp);
if (tmp != j)
return ret;
j += inc;
}
*iret = 1;
return ret;
}
static int sisusb_check_ranks(struct sisusb_usb_data *sisusb,
int *iret, int rankno, int idx, int bw, const u8 rtype[][5])
{
int ret = 0, i, i2ret;
u32 inc;
*iret = 0;
for (i = rankno; i >= 1; i--) {
inc = 1 << (rtype[idx][2] + rtype[idx][1] + rtype[idx][0] +
bw / 64 + i);
ret |= sisusb_check_rbc(sisusb, &i2ret, inc, 2);
if (!i2ret)
return ret;
}
inc = 1 << (rtype[idx][2] + bw / 64 + 2);
ret |= sisusb_check_rbc(sisusb, &i2ret, inc, 4);
if (!i2ret)
return ret;
inc = 1 << (10 + bw / 64);
ret |= sisusb_check_rbc(sisusb, &i2ret, inc, 2);
if (!i2ret)
return ret;
*iret = 1;
return ret;
}
static int sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret,
int bw, int chab)
{
int ret = 0, i2ret = 0, i, j;
static const u8 sdramtype[13][5] = {
{ 2, 12, 9, 64, 0x35 },
{ 1, 13, 9, 64, 0x44 },
{ 2, 12, 8, 32, 0x31 },
{ 2, 11, 9, 32, 0x25 },
{ 1, 12, 9, 32, 0x34 },
{ 1, 13, 8, 32, 0x40 },
{ 2, 11, 8, 16, 0x21 },
{ 1, 12, 8, 16, 0x30 },
{ 1, 11, 9, 16, 0x24 },
{ 1, 11, 8, 8, 0x20 },
{ 2, 9, 8, 4, 0x01 },
{ 1, 10, 8, 4, 0x10 },
{ 1, 9, 8, 2, 0x00 }
};
*iret = 1; /* error */
for (i = 0; i < 13; i++) {
ret |= SETIREGANDOR(SISSR, 0x13, 0x80, sdramtype[i][4]);
for (j = 2; j > 0; j--) {
ret |= sisusb_set_rank(sisusb, &i2ret, i, j, chab,
sdramtype, bw);
if (!i2ret)
continue;
ret |= sisusb_check_ranks(sisusb, &i2ret, j, i, bw,
sdramtype);
if (i2ret) {
*iret = 0; /* ram size found */
return ret;
}
}
}
return ret;
}
static int sisusb_setup_screen(struct sisusb_usb_data *sisusb,
int clrall, int drwfr)
{
int ret = 0;
u32 address;
int i, length, modex, modey, bpp;
modex = 640; modey = 480; bpp = 2;
address = sisusb->vrambase; /* Clear video ram */
if (clrall)
length = sisusb->vramsize;
else
length = modex * bpp * modey;
ret = sisusb_clear_vram(sisusb, address, length);
if (!ret && drwfr) {
for (i = 0; i < modex; i++) {
address = sisusb->vrambase + (i * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
address, 0xf100);
address += (modex * (modey-1) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
address, 0xf100);
}
for (i = 0; i < modey; i++) {
address = sisusb->vrambase + ((i * modex) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
address, 0xf100);
address += ((modex - 1) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
address, 0xf100);
}
}
return ret;
}
static void sisusb_set_default_mode(struct sisusb_usb_data *sisusb,
int touchengines)
{
int i, j, modex, bpp, du;
u8 sr31, cr63, tmp8;
static const char attrdata[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x01, 0x00, 0x00, 0x00
};
static const char crtcrdata[] = {
0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xea, 0x8c, 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3,
0xff
};
static const char grcdata[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f,
0xff
};
static const char crtcdata[] = {
0x5f, 0x4f, 0x4f, 0x83, 0x55, 0x81, 0x0b, 0x3e,
0xe9, 0x8b, 0xdf, 0xe8, 0x0c, 0x00, 0x00, 0x05,
0x00
};
modex = 640; bpp = 2;
GETIREG(SISSR, 0x31, &sr31);
GETIREG(SISCR, 0x63, &cr63);
SETIREGOR(SISSR, 0x01, 0x20);
SETIREG(SISCR, 0x63, cr63 & 0xbf);
SETIREGOR(SISCR, 0x17, 0x80);
SETIREGOR(SISSR, 0x1f, 0x04);
SETIREGAND(SISSR, 0x07, 0xfb);
SETIREG(SISSR, 0x00, 0x03); /* seq */
SETIREG(SISSR, 0x01, 0x21);
SETIREG(SISSR, 0x02, 0x0f);
SETIREG(SISSR, 0x03, 0x00);
SETIREG(SISSR, 0x04, 0x0e);
SETREG(SISMISCW, 0x23); /* misc */
for (i = 0; i <= 0x18; i++) { /* crtc */
SETIREG(SISCR, i, crtcrdata[i]);
}
for (i = 0; i <= 0x13; i++) { /* att */
GETREG(SISINPSTAT, &tmp8);
SETREG(SISAR, i);
SETREG(SISAR, attrdata[i]);
}
GETREG(SISINPSTAT, &tmp8);
SETREG(SISAR, 0x14);
SETREG(SISAR, 0x00);
GETREG(SISINPSTAT, &tmp8);
SETREG(SISAR, 0x20);
GETREG(SISINPSTAT, &tmp8);
for (i = 0; i <= 0x08; i++) { /* grc */
SETIREG(SISGR, i, grcdata[i]);
}
SETIREGAND(SISGR, 0x05, 0xbf);
for (i = 0x0A; i <= 0x0E; i++) { /* clr ext */
SETIREG(SISSR, i, 0x00);
}
SETIREGAND(SISSR, 0x37, 0xfe);
SETREG(SISMISCW, 0xef); /* sync */
SETIREG(SISCR, 0x11, 0x00); /* crtc */
for (j = 0x00, i = 0; i <= 7; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
for (j = 0x10; i <= 10; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
for (j = 0x15; i <= 12; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
for (j = 0x0A; i <= 15; i++, j++)
SETIREG(SISSR, j, crtcdata[i]);
SETIREG(SISSR, 0x0E, (crtcdata[16] & 0xE0));
SETIREGANDOR(SISCR, 0x09, 0x5f, ((crtcdata[16] & 0x01) << 5));
SETIREG(SISCR, 0x14, 0x4f);
du = (modex / 16) * (bpp * 2); /* offset/pitch */
SETIREGANDOR(SISSR, 0x0e, 0xf0, ((du >> 8) & 0x0f));
SETIREG(SISCR, 0x13, (du & 0xff));
du <<= 5;
tmp8 = du >> 8;
SETIREG(SISSR, 0x10, tmp8);
SETIREG(SISSR, 0x31, 0x00); /* VCLK */
SETIREG(SISSR, 0x2b, 0x1b);
SETIREG(SISSR, 0x2c, 0xe1);
SETIREG(SISSR, 0x2d, 0x01);
SETIREGAND(SISSR, 0x3d, 0xfe); /* FIFO */
SETIREG(SISSR, 0x08, 0xae);
SETIREGAND(SISSR, 0x09, 0xf0);
SETIREG(SISSR, 0x08, 0x34);
SETIREGOR(SISSR, 0x3d, 0x01);
SETIREGAND(SISSR, 0x1f, 0x3f); /* mode regs */
SETIREGANDOR(SISSR, 0x06, 0xc0, 0x0a);
SETIREG(SISCR, 0x19, 0x00);
SETIREGAND(SISCR, 0x1a, 0xfc);
SETIREGAND(SISSR, 0x0f, 0xb7);
SETIREGAND(SISSR, 0x31, 0xfb);
SETIREGANDOR(SISSR, 0x21, 0x1f, 0xa0);
SETIREGAND(SISSR, 0x32, 0xf3);
SETIREGANDOR(SISSR, 0x07, 0xf8, 0x03);
SETIREG(SISCR, 0x52, 0x6c);
SETIREG(SISCR, 0x0d, 0x00); /* adjust frame */
SETIREG(SISCR, 0x0c, 0x00);
SETIREG(SISSR, 0x0d, 0x00);
SETIREGAND(SISSR, 0x37, 0xfe);
SETIREG(SISCR, 0x32, 0x20);
SETIREGAND(SISSR, 0x01, 0xdf); /* enable display */
SETIREG(SISCR, 0x63, (cr63 & 0xbf));
SETIREG(SISSR, 0x31, (sr31 & 0xfb));
if (touchengines) {
SETIREG(SISSR, 0x20, 0xa1); /* enable engines */
SETIREGOR(SISSR, 0x1e, 0x5a);
SETIREG(SISSR, 0x26, 0x01); /* disable cmdqueue */
SETIREG(SISSR, 0x27, 0x1f);
SETIREG(SISSR, 0x26, 0x00);
}
SETIREG(SISCR, 0x34, 0x44); /* we just set std mode #44 */
}
static int sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
{
int ret = 0, i, j, bw, chab, iret, retry = 3;
u8 tmp8, ramtype;
u32 tmp32;
static const char mclktable[] = {
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143
};
static const char eclktable[] = {
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143,
0x3b, 0x22, 0x01, 143
};
static const char ramtypetable1[] = {
0x00, 0x04, 0x60, 0x60,
0x0f, 0x0f, 0x1f, 0x1f,
0xba, 0xba, 0xba, 0xba,
0xa9, 0xa9, 0xac, 0xac,
0xa0, 0xa0, 0xa0, 0xa8,
0x00, 0x00, 0x02, 0x02,
0x30, 0x30, 0x40, 0x40
};
static const char ramtypetable2[] = {
0x77, 0x77, 0x44, 0x44,
0x77, 0x77, 0x44, 0x44,
0x00, 0x00, 0x00, 0x00,
0x5b, 0x5b, 0xab, 0xab,
0x00, 0x00, 0xf0, 0xf8
};
while (retry--) {
/* Enable VGA */
ret = GETREG(SISVGAEN, &tmp8);
ret |= SETREG(SISVGAEN, (tmp8 | 0x01));
/* Enable GPU access to VRAM */
ret |= GETREG(SISMISCR, &tmp8);
ret |= SETREG(SISMISCW, (tmp8 | 0x01));
if (ret)
continue;
/* Reset registers */
ret |= SETIREGAND(SISCR, 0x5b, 0xdf);
ret |= SETIREG(SISSR, 0x05, 0x86);
ret |= SETIREGOR(SISSR, 0x20, 0x01);
ret |= SETREG(SISMISCW, 0x67);
for (i = 0x06; i <= 0x1f; i++)
ret |= SETIREG(SISSR, i, 0x00);
for (i = 0x21; i <= 0x27; i++)
ret |= SETIREG(SISSR, i, 0x00);
for (i = 0x31; i <= 0x3d; i++)
ret |= SETIREG(SISSR, i, 0x00);
for (i = 0x12; i <= 0x1b; i++)
ret |= SETIREG(SISSR, i, 0x00);
for (i = 0x79; i <= 0x7c; i++)
ret |= SETIREG(SISCR, i, 0x00);
if (ret)
continue;
ret |= SETIREG(SISCR, 0x63, 0x80);
ret |= GETIREG(SISSR, 0x3a, &ramtype);
ramtype &= 0x03;
ret |= SETIREG(SISSR, 0x28, mclktable[ramtype * 4]);
ret |= SETIREG(SISSR, 0x29, mclktable[(ramtype * 4) + 1]);
ret |= SETIREG(SISSR, 0x2a, mclktable[(ramtype * 4) + 2]);
ret |= SETIREG(SISSR, 0x2e, eclktable[ramtype * 4]);
ret |= SETIREG(SISSR, 0x2f, eclktable[(ramtype * 4) + 1]);
ret |= SETIREG(SISSR, 0x30, eclktable[(ramtype * 4) + 2]);
ret |= SETIREG(SISSR, 0x07, 0x18);
ret |= SETIREG(SISSR, 0x11, 0x0f);
if (ret)
continue;
for (i = 0x15, j = 0; i <= 0x1b; i++, j++) {
ret |= SETIREG(SISSR, i,
ramtypetable1[(j*4) + ramtype]);
}
for (i = 0x40, j = 0; i <= 0x44; i++, j++) {
ret |= SETIREG(SISCR, i,
ramtypetable2[(j*4) + ramtype]);
}
ret |= SETIREG(SISCR, 0x49, 0xaa);
ret |= SETIREG(SISSR, 0x1f, 0x00);
ret |= SETIREG(SISSR, 0x20, 0xa0);
ret |= SETIREG(SISSR, 0x23, 0xf6);
ret |= SETIREG(SISSR, 0x24, 0x0d);
ret |= SETIREG(SISSR, 0x25, 0x33);
ret |= SETIREG(SISSR, 0x11, 0x0f);
ret |= SETIREGOR(SISPART1, 0x2f, 0x01);
ret |= SETIREGAND(SISCAP, 0x3f, 0xef);
if (ret)
continue;
ret |= SETIREG(SISPART1, 0x00, 0x00);
ret |= GETIREG(SISSR, 0x13, &tmp8);
tmp8 >>= 4;
ret |= SETIREG(SISPART1, 0x02, 0x00);
ret |= SETIREG(SISPART1, 0x2e, 0x08);
ret |= sisusb_read_pci_config(sisusb, 0x50, &tmp32);
tmp32 &= 0x00f00000;
tmp8 = (tmp32 == 0x100000) ? 0x33 : 0x03;
ret |= SETIREG(SISSR, 0x25, tmp8);
tmp8 = (tmp32 == 0x100000) ? 0xaa : 0x88;
ret |= SETIREG(SISCR, 0x49, tmp8);
ret |= SETIREG(SISSR, 0x27, 0x1f);
ret |= SETIREG(SISSR, 0x31, 0x00);
ret |= SETIREG(SISSR, 0x32, 0x11);
ret |= SETIREG(SISSR, 0x33, 0x00);
if (ret)
continue;
ret |= SETIREG(SISCR, 0x83, 0x00);
sisusb_set_default_mode(sisusb, 0);
ret |= SETIREGAND(SISSR, 0x21, 0xdf);
ret |= SETIREGOR(SISSR, 0x01, 0x20);
ret |= SETIREGOR(SISSR, 0x16, 0x0f);
ret |= sisusb_triggersr16(sisusb, ramtype);
/* Disable refresh */
ret |= SETIREGAND(SISSR, 0x17, 0xf8);
ret |= SETIREGOR(SISSR, 0x19, 0x03);
ret |= sisusb_getbuswidth(sisusb, &bw, &chab);
ret |= sisusb_verify_mclk(sisusb);
if (ramtype <= 1) {
ret |= sisusb_get_sdram_size(sisusb, &iret, bw, chab);
if (iret) {
dev_err(&sisusb->sisusb_dev->dev,
"RAM size detection failed, assuming 8MB video RAM\n");
ret |= SETIREG(SISSR, 0x14, 0x31);
/* TODO */
}
} else {
dev_err(&sisusb->sisusb_dev->dev,
"DDR RAM device found, assuming 8MB video RAM\n");
ret |= SETIREG(SISSR, 0x14, 0x31);
/* *** TODO *** */
}
/* Enable refresh */
ret |= SETIREG(SISSR, 0x16, ramtypetable1[4 + ramtype]);
ret |= SETIREG(SISSR, 0x17, ramtypetable1[8 + ramtype]);
ret |= SETIREG(SISSR, 0x19, ramtypetable1[16 + ramtype]);
ret |= SETIREGOR(SISSR, 0x21, 0x20);
ret |= SETIREG(SISSR, 0x22, 0xfb);
ret |= SETIREG(SISSR, 0x21, 0xa5);
if (ret == 0)
break;
}
return ret;
}
#undef SETREG
#undef GETREG
#undef SETIREG
#undef GETIREG
#undef SETIREGOR
#undef SETIREGAND
#undef SETIREGANDOR
#undef READL
#undef WRITEL
static void sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
{
u8 tmp8, tmp82, ramtype;
int bw = 0;
char *ramtypetext1 = NULL;
static const char ram_datarate[4] = {'S', 'S', 'D', 'D'};
static const char ram_dynamictype[4] = {'D', 'G', 'D', 'G'};
static const int busSDR[4] = {64, 64, 128, 128};
static const int busDDR[4] = {32, 32, 64, 64};
static const int busDDRA[4] = {64+32, 64+32, (64+32)*2, (64+32)*2};
sisusb_getidxreg(sisusb, SISSR, 0x14, &tmp8);
sisusb_getidxreg(sisusb, SISSR, 0x15, &tmp82);
sisusb_getidxreg(sisusb, SISSR, 0x3a, &ramtype);
sisusb->vramsize = (1 << ((tmp8 & 0xf0) >> 4)) * 1024 * 1024;
ramtype &= 0x03;
switch ((tmp8 >> 2) & 0x03) {
case 0:
ramtypetext1 = "1 ch/1 r";
if (tmp82 & 0x10)
bw = 32;
else
bw = busSDR[(tmp8 & 0x03)];
break;
case 1:
ramtypetext1 = "1 ch/2 r";
sisusb->vramsize <<= 1;
bw = busSDR[(tmp8 & 0x03)];
break;
case 2:
ramtypetext1 = "asymmetric";
sisusb->vramsize += sisusb->vramsize/2;
bw = busDDRA[(tmp8 & 0x03)];
break;
case 3:
ramtypetext1 = "2 channel";
sisusb->vramsize <<= 1;
bw = busDDR[(tmp8 & 0x03)];
break;
}
dev_info(&sisusb->sisusb_dev->dev,
"%dMB %s %cDR S%cRAM, bus width %d\n",
sisusb->vramsize >> 20, ramtypetext1,
ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
}
static int sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
{
struct sisusb_packet packet;
int ret;
u32 tmp32;
/* Do some magic */
packet.header = 0x001f;
packet.address = 0x00000324;
packet.data = 0x00000004;
ret = sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
packet.header = 0x001f;
packet.address = 0x00000364;
packet.data = 0x00000004;
ret |= sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
packet.header = 0x001f;
packet.address = 0x00000384;
packet.data = 0x00000004;
ret |= sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
packet.header = 0x001f;
packet.address = 0x00000100;
packet.data = 0x00000700;
ret |= sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
packet.header = 0x000f;
packet.address = 0x00000004;
ret |= sisusb_send_bridge_packet(sisusb, 6, &packet, 0);
packet.data |= 0x17;
ret |= sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
/* Init BAR 0 (VRAM) */
ret |= sisusb_read_pci_config(sisusb, 0x10, &tmp32);
ret |= sisusb_write_pci_config(sisusb, 0x10, 0xfffffff0);
ret |= sisusb_read_pci_config(sisusb, 0x10, &tmp32);
tmp32 &= 0x0f;
tmp32 |= SISUSB_PCI_MEMBASE;
ret |= sisusb_write_pci_config(sisusb, 0x10, tmp32);
/* Init BAR 1 (MMIO) */
ret |= sisusb_read_pci_config(sisusb, 0x14, &tmp32);
ret |= sisusb_write_pci_config(sisusb, 0x14, 0xfffffff0);
ret |= sisusb_read_pci_config(sisusb, 0x14, &tmp32);
tmp32 &= 0x0f;
tmp32 |= SISUSB_PCI_MMIOBASE;
ret |= sisusb_write_pci_config(sisusb, 0x14, tmp32);
/* Init BAR 2 (i/o ports) */
ret |= sisusb_read_pci_config(sisusb, 0x18, &tmp32);
ret |= sisusb_write_pci_config(sisusb, 0x18, 0xfffffff0);
ret |= sisusb_read_pci_config(sisusb, 0x18, &tmp32);
tmp32 &= 0x0f;
tmp32 |= SISUSB_PCI_IOPORTBASE;
ret |= sisusb_write_pci_config(sisusb, 0x18, tmp32);
/* Enable memory and i/o access */
ret |= sisusb_read_pci_config(sisusb, 0x04, &tmp32);
tmp32 |= 0x3;
ret |= sisusb_write_pci_config(sisusb, 0x04, tmp32);
if (ret == 0) {
/* Some further magic */
packet.header = 0x001f;
packet.address = 0x00000050;
packet.data = 0x000000ff;
ret |= sisusb_send_bridge_packet(sisusb, 10, &packet, 0);
}
return ret;
}
/* Initialize the graphics device (return 0 on success)
* This initializes the net2280 as well as the PCI registers
* of the graphics board.
*/
static int sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
{
int ret = 0, test = 0;
u32 tmp32;
if (sisusb->devinit == 1) {
/* Read PCI BARs and see if they have been set up */
ret |= sisusb_read_pci_config(sisusb, 0x10, &tmp32);
if (ret)
return ret;
if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MEMBASE)
test++;
ret |= sisusb_read_pci_config(sisusb, 0x14, &tmp32);
if (ret)
return ret;
if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MMIOBASE)
test++;
ret |= sisusb_read_pci_config(sisusb, 0x18, &tmp32);
if (ret)
return ret;
if ((tmp32 & 0xfffffff0) == SISUSB_PCI_IOPORTBASE)
test++;
}
/* No? So reset the device */
if ((sisusb->devinit == 0) || (test != 3)) {
ret |= sisusb_do_init_gfxdevice(sisusb);
if (ret == 0)
sisusb->devinit = 1;
}
if (sisusb->devinit) {
/* Initialize the graphics core */
if (sisusb_init_gfxcore(sisusb) == 0) {
sisusb->gfxinit = 1;
sisusb_get_ramconfig(sisusb);
sisusb_set_default_mode(sisusb, 1);
ret |= sisusb_setup_screen(sisusb, 1, initscreen);
}
}
return ret;
}
/* fops */
static int sisusb_open(struct inode *inode, struct file *file)
{
struct sisusb_usb_data *sisusb;
struct usb_interface *interface;
int subminor = iminor(inode);
interface = usb_find_interface(&sisusb_driver, subminor);
if (!interface)
return -ENODEV;
sisusb = usb_get_intfdata(interface);
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
return -EBUSY;
}
if (!sisusb->devinit) {
if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
sisusb->sisusb_dev->speed >= USB_SPEED_SUPER) {
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev,
"Failed to initialize device\n");
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev,
"Device not attached to USB 2.0 hub\n");
return -EIO;
}
}
/* Increment usage count for our sisusb */
kref_get(&sisusb->kref);
sisusb->isopen = 1;
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
return 0;
}
static void sisusb_delete(struct kref *kref)
{
struct sisusb_usb_data *sisusb = to_sisusb_dev(kref);
if (!sisusb)
return;
usb_put_dev(sisusb->sisusb_dev);
sisusb->sisusb_dev = NULL;
sisusb_free_buffers(sisusb);
sisusb_free_urbs(sisusb);
kfree(sisusb);
}
static int sisusb_release(struct inode *inode, struct file *file)
{
struct sisusb_usb_data *sisusb;
sisusb = file->private_data;
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
if (sisusb->present) {
/* Wait for all URBs to finish if device still present */
if (!sisusb_wait_all_out_complete(sisusb))
sisusb_kill_all_busy(sisusb);
}
sisusb->isopen = 0;
file->private_data = NULL;
mutex_unlock(&sisusb->lock);
/* decrement the usage count on our device */
kref_put(&sisusb->kref, sisusb_delete);
return 0;
}
static ssize_t sisusb_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct sisusb_usb_data *sisusb;
ssize_t bytes_read = 0;
int errno = 0;
u8 buf8;
u16 buf16;
u32 buf32, address;
sisusb = file->private_data;
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
/* Sanity check */
if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) {
mutex_unlock(&sisusb->lock);
return -ENODEV;
}
if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
(*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
SISUSB_PCI_IOPORTBASE;
/* Read i/o ports
* Byte, word and long(32) can be read. As this
* emulates inX instructions, the data returned is
* in machine-endianness.
*/
switch (count) {
case 1:
if (sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO,
address, &buf8))
errno = -EIO;
else if (put_user(buf8, (u8 __user *)buffer))
errno = -EFAULT;
else
bytes_read = 1;
break;
case 2:
if (sisusb_read_memio_word(sisusb, SISUSB_TYPE_IO,
address, &buf16))
errno = -EIO;
else if (put_user(buf16, (u16 __user *)buffer))
errno = -EFAULT;
else
bytes_read = 2;
break;
case 4:
if (sisusb_read_memio_long(sisusb, SISUSB_TYPE_IO,
address, &buf32))
errno = -EIO;
else if (put_user(buf32, (u32 __user *)buffer))
errno = -EFAULT;
else
bytes_read = 4;
break;
default:
errno = -EIO;
}
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE && (*ppos) <
SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
SISUSB_PCI_MEMBASE;
/* Read video ram
* Remember: Data delivered is never endian-corrected
*/
errno = sisusb_read_mem_bulk(sisusb, address,
NULL, count, buffer, &bytes_read);
if (bytes_read)
errno = bytes_read;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
(*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE +
SISUSB_PCI_MMIOSIZE) {
address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
SISUSB_PCI_MMIOBASE;
/* Read MMIO
* Remember: Data delivered is never endian-corrected
*/
errno = sisusb_read_mem_bulk(sisusb, address,
NULL, count, buffer, &bytes_read);
if (bytes_read)
errno = bytes_read;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
(*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + 0x5c) {
if (count != 4) {
mutex_unlock(&sisusb->lock);
return -EINVAL;
}
address = (*ppos) - SISUSB_PCI_PSEUDO_PCIBASE;
/* Read PCI config register
* Return value delivered in machine endianness.
*/
if (sisusb_read_pci_config(sisusb, address, &buf32))
errno = -EIO;
else if (put_user(buf32, (u32 __user *)buffer))
errno = -EFAULT;
else
bytes_read = 4;
} else {
errno = -EBADFD;
}
(*ppos) += bytes_read;
mutex_unlock(&sisusb->lock);
return errno ? errno : bytes_read;
}
static ssize_t sisusb_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct sisusb_usb_data *sisusb;
int errno = 0;
ssize_t bytes_written = 0;
u8 buf8;
u16 buf16;
u32 buf32, address;
sisusb = file->private_data;
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
/* Sanity check */
if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) {
mutex_unlock(&sisusb->lock);
return -ENODEV;
}
if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
(*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
SISUSB_PCI_IOPORTBASE;
/* Write i/o ports
* Byte, word and long(32) can be written. As this
* emulates outX instructions, the data is expected
* in machine-endianness.
*/
switch (count) {
case 1:
if (get_user(buf8, (u8 __user *)buffer))
errno = -EFAULT;
else if (sisusb_write_memio_byte(sisusb,
SISUSB_TYPE_IO, address, buf8))
errno = -EIO;
else
bytes_written = 1;
break;
case 2:
if (get_user(buf16, (u16 __user *)buffer))
errno = -EFAULT;
else if (sisusb_write_memio_word(sisusb,
SISUSB_TYPE_IO, address, buf16))
errno = -EIO;
else
bytes_written = 2;
break;
case 4:
if (get_user(buf32, (u32 __user *)buffer))
errno = -EFAULT;
else if (sisusb_write_memio_long(sisusb,
SISUSB_TYPE_IO, address, buf32))
errno = -EIO;
else
bytes_written = 4;
break;
default:
errno = -EIO;
}
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE &&
(*ppos) < SISUSB_PCI_PSEUDO_MEMBASE +
sisusb->vramsize) {
address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
SISUSB_PCI_MEMBASE;
/* Write video ram.
* Buffer is copied 1:1, therefore, on big-endian
* machines, the data must be swapped by userland
* in advance (if applicable; no swapping in 8bpp
* mode or if YUV data is being transferred).
*/
errno = sisusb_write_mem_bulk(sisusb, address, NULL,
count, buffer, 0, &bytes_written);
if (bytes_written)
errno = bytes_written;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
(*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE +
SISUSB_PCI_MMIOSIZE) {
address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
SISUSB_PCI_MMIOBASE;
/* Write MMIO.
* Buffer is copied 1:1, therefore, on big-endian
* machines, the data must be swapped by userland
* in advance.
*/
errno = sisusb_write_mem_bulk(sisusb, address, NULL,
count, buffer, 0, &bytes_written);
if (bytes_written)
errno = bytes_written;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
(*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE +
SISUSB_PCI_PCONFSIZE) {
if (count != 4) {
mutex_unlock(&sisusb->lock);
return -EINVAL;
}
address = (*ppos) - SISUSB_PCI_PSEUDO_PCIBASE;
/* Write PCI config register.
* Given value expected in machine endianness.
*/
if (get_user(buf32, (u32 __user *)buffer))
errno = -EFAULT;
else if (sisusb_write_pci_config(sisusb, address, buf32))
errno = -EIO;
else
bytes_written = 4;
} else {
/* Error */
errno = -EBADFD;
}
(*ppos) += bytes_written;
mutex_unlock(&sisusb->lock);
return errno ? errno : bytes_written;
}
static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
{
struct sisusb_usb_data *sisusb;
loff_t ret;
sisusb = file->private_data;
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
/* Sanity check */
if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) {
mutex_unlock(&sisusb->lock);
return -ENODEV;
}
ret = no_seek_end_llseek(file, offset, orig);
mutex_unlock(&sisusb->lock);
return ret;
}
static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
struct sisusb_command *y, unsigned long arg)
{
int retval, length;
u32 port, address;
/* All our commands require the device
* to be initialized.
*/
if (!sisusb->devinit)
return -ENODEV;
port = y->data3 -
SISUSB_PCI_PSEUDO_IOPORTBASE +
SISUSB_PCI_IOPORTBASE;
switch (y->operation) {
case SUCMD_GET:
retval = sisusb_getidxreg(sisusb, port, y->data0, &y->data1);
if (!retval) {
if (copy_to_user((void __user *)arg, y, sizeof(*y)))
retval = -EFAULT;
}
break;
case SUCMD_SET:
retval = sisusb_setidxreg(sisusb, port, y->data0, y->data1);
break;
case SUCMD_SETOR:
retval = sisusb_setidxregor(sisusb, port, y->data0, y->data1);
break;
case SUCMD_SETAND:
retval = sisusb_setidxregand(sisusb, port, y->data0, y->data1);
break;
case SUCMD_SETANDOR:
retval = sisusb_setidxregandor(sisusb, port, y->data0,
y->data1, y->data2);
break;
case SUCMD_SETMASK:
retval = sisusb_setidxregmask(sisusb, port, y->data0,
y->data1, y->data2);
break;
case SUCMD_CLRSCR:
/* Gfx core must be initialized */
if (!sisusb->gfxinit)
return -ENODEV;
length = (y->data0 << 16) | (y->data1 << 8) | y->data2;
address = y->data3 - SISUSB_PCI_PSEUDO_MEMBASE +
SISUSB_PCI_MEMBASE;
retval = sisusb_clear_vram(sisusb, address, length);
break;
case SUCMD_HANDLETEXTMODE:
retval = 0;
break;
default:
retval = -EINVAL;
}
if (retval > 0)
retval = -EIO;
return retval;
}
static long sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sisusb_usb_data *sisusb;
struct sisusb_info x;
struct sisusb_command y;
long retval = 0;
u32 __user *argp = (u32 __user *)arg;
sisusb = file->private_data;
if (!sisusb)
return -ENODEV;
mutex_lock(&sisusb->lock);
/* Sanity check */
if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) {
retval = -ENODEV;
goto err_out;
}
switch (cmd) {
case SISUSB_GET_CONFIG_SIZE:
if (put_user(sizeof(x), argp))
retval = -EFAULT;
break;
case SISUSB_GET_CONFIG:
x.sisusb_id = SISUSB_ID;
x.sisusb_version = SISUSB_VERSION;
x.sisusb_revision = SISUSB_REVISION;
x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
x.sisusb_gfxinit = sisusb->gfxinit;
x.sisusb_vrambase = SISUSB_PCI_PSEUDO_MEMBASE;
x.sisusb_mmiobase = SISUSB_PCI_PSEUDO_MMIOBASE;
x.sisusb_iobase = SISUSB_PCI_PSEUDO_IOPORTBASE;
x.sisusb_pcibase = SISUSB_PCI_PSEUDO_PCIBASE;
x.sisusb_vramsize = sisusb->vramsize;
x.sisusb_minor = sisusb->minor;
x.sisusb_fbdevactive = 0;
x.sisusb_conactive = 0;
memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
if (copy_to_user((void __user *)arg, &x, sizeof(x)))
retval = -EFAULT;
break;
case SISUSB_COMMAND:
if (copy_from_user(&y, (void __user *)arg, sizeof(y)))
retval = -EFAULT;
else
retval = sisusb_handle_command(sisusb, &y, arg);
break;
default:
retval = -ENOTTY;
break;
}
err_out:
mutex_unlock(&sisusb->lock);
return retval;
}
#ifdef CONFIG_COMPAT
static long sisusb_compat_ioctl(struct file *f, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case SISUSB_GET_CONFIG_SIZE:
case SISUSB_GET_CONFIG:
case SISUSB_COMMAND:
return sisusb_ioctl(f, cmd, arg);
default:
return -ENOIOCTLCMD;
}
}
#endif
static const struct file_operations usb_sisusb_fops = {
.owner = THIS_MODULE,
.open = sisusb_open,
.release = sisusb_release,
.read = sisusb_read,
.write = sisusb_write,
.llseek = sisusb_lseek,
#ifdef CONFIG_COMPAT
.compat_ioctl = sisusb_compat_ioctl,
#endif
.unlocked_ioctl = sisusb_ioctl
};
static struct usb_class_driver usb_sisusb_class = {
.name = "sisusbvga%d",
.fops = &usb_sisusb_fops,
.minor_base = SISUSB_MINOR
};
static int sisusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct sisusb_usb_data *sisusb;
int retval = 0, i;
static const u8 ep_addresses[] = {
SISUSB_EP_GFX_IN | USB_DIR_IN,
SISUSB_EP_GFX_OUT | USB_DIR_OUT,
SISUSB_EP_GFX_BULK_OUT | USB_DIR_OUT,
SISUSB_EP_GFX_LBULK_OUT | USB_DIR_OUT,
SISUSB_EP_BRIDGE_IN | USB_DIR_IN,
SISUSB_EP_BRIDGE_OUT | USB_DIR_OUT,
0};
/* Are the expected endpoints present? */
if (!usb_check_bulk_endpoints(intf, ep_addresses)) {
dev_err(&intf->dev, "Invalid USB2VGA device\n");
return -EINVAL;
}
dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
dev->devnum);
/* Allocate memory for our private */
sisusb = kzalloc(sizeof(*sisusb), GFP_KERNEL);
if (!sisusb)
return -ENOMEM;
kref_init(&sisusb->kref);
mutex_init(&(sisusb->lock));
sisusb->sisusb_dev = dev;
sisusb->vrambase = SISUSB_PCI_MEMBASE;
sisusb->mmiobase = SISUSB_PCI_MMIOBASE;
sisusb->mmiosize = SISUSB_PCI_MMIOSIZE;
sisusb->ioportbase = SISUSB_PCI_IOPORTBASE;
/* Everything else is zero */
/* Register device */
retval = usb_register_dev(intf, &usb_sisusb_class);
if (retval) {
dev_err(&sisusb->sisusb_dev->dev,
"Failed to get a minor for device %d\n",
dev->devnum);
retval = -ENODEV;
goto error_1;
}
sisusb->minor = intf->minor;
/* Allocate buffers */
sisusb->ibufsize = SISUSB_IBUF_SIZE;
sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL);
if (!sisusb->ibuf) {
retval = -ENOMEM;
goto error_2;
}
sisusb->numobufs = 0;
sisusb->obufsize = SISUSB_OBUF_SIZE;
for (i = 0; i < NUMOBUFS; i++) {
sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL);
if (!sisusb->obuf[i]) {
if (i == 0) {
retval = -ENOMEM;
goto error_3;
}
break;
}
sisusb->numobufs++;
}
/* Allocate URBs */
sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL);
if (!sisusb->sisurbin) {
retval = -ENOMEM;
goto error_3;
}
sisusb->completein = 1;
for (i = 0; i < sisusb->numobufs; i++) {
sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!sisusb->sisurbout[i]) {
retval = -ENOMEM;
goto error_4;
}
sisusb->urbout_context[i].sisusb = (void *)sisusb;
sisusb->urbout_context[i].urbindex = i;
sisusb->urbstatus[i] = 0;
}
dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n",
sisusb->numobufs);
/* Do remaining init stuff */
init_waitqueue_head(&sisusb->wait_q);
usb_set_intfdata(intf, sisusb);
usb_get_dev(sisusb->sisusb_dev);
sisusb->present = 1;
if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) {
int initscreen = 1;
if (sisusb_init_gfxdevice(sisusb, initscreen))
dev_err(&sisusb->sisusb_dev->dev,
"Failed to early initialize device\n");
} else
dev_info(&sisusb->sisusb_dev->dev,
"Not attached to USB 2.0 hub, deferring init\n");
sisusb->ready = 1;
#ifdef SISUSBENDIANTEST
dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST ***\n");
sisusb_testreadwrite(sisusb);
dev_dbg(&sisusb->sisusb_dev->dev, "*** RWTEST END ***\n");
#endif
return 0;
error_4:
sisusb_free_urbs(sisusb);
error_3:
sisusb_free_buffers(sisusb);
error_2:
usb_deregister_dev(intf, &usb_sisusb_class);
error_1:
kfree(sisusb);
return retval;
}
static void sisusb_disconnect(struct usb_interface *intf)
{
struct sisusb_usb_data *sisusb;
/* This should *not* happen */
sisusb = usb_get_intfdata(intf);
if (!sisusb)
return;
usb_deregister_dev(intf, &usb_sisusb_class);
mutex_lock(&sisusb->lock);
/* Wait for all URBs to complete and kill them in case (MUST do) */
if (!sisusb_wait_all_out_complete(sisusb))
sisusb_kill_all_busy(sisusb);
usb_set_intfdata(intf, NULL);
sisusb->present = 0;
sisusb->ready = 0;
mutex_unlock(&sisusb->lock);
/* decrement our usage count */
kref_put(&sisusb->kref, sisusb_delete);
}
static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0550) },
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
{ USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x0711, 0x0920) },
{ USB_DEVICE(0x0711, 0x0950) },
{ USB_DEVICE(0x0711, 0x5200) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
};
MODULE_DEVICE_TABLE(usb, sisusb_table);
static struct usb_driver sisusb_driver = {
.name = "sisusb",
.probe = sisusb_probe,
.disconnect = sisusb_disconnect,
.id_table = sisusb_table,
};
module_usb_driver(sisusb_driver);
MODULE_AUTHOR("Thomas Winischhofer <[email protected]>");
MODULE_DESCRIPTION("sisusbvga - Driver for Net2280/SiS315-based USB2VGA dongles");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/misc/sisusbvga/sisusbvga.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Standalone EHCI usb debug driver
*
* Originally written by:
* Eric W. Biederman" <[email protected]> and
* Yinghai Lu <[email protected]>
*
* Changes for early/late printk and HW errata:
* Jason Wessel <[email protected]>
* Copyright (C) 2009 Wind River Systems, Inc.
*
*/
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/usb/ch9.h>
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
#include <linux/kgdb.h>
#include <linux/kthread.h>
#include <asm/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
/* The code here is intended to talk directly to the EHCI debug port
* and does not require that you have any kind of USB host controller
* drivers or USB device drivers compiled into the kernel.
*
* If you make a change to anything in here, the following test cases
* need to pass where a USB debug device works in the following
* configurations.
*
* 1. boot args: earlyprintk=dbgp
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
* 2. boot args: earlyprintk=dbgp,keep
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
* 3. boot args: earlyprintk=dbgp console=ttyUSB0
* o kernel has CONFIG_USB_EHCI_HCD=y and
* CONFIG_USB_SERIAL_DEBUG=y
* 4. boot args: earlyprintk=vga,dbgp
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
*
* For the 4th configuration you can turn on or off the DBGP_DEBUG
* such that you can debug the dbgp device's driver code.
*/
static int dbgp_phys_port = 1;
static struct ehci_caps __iomem *ehci_caps;
static struct ehci_regs __iomem *ehci_regs;
static struct ehci_dbg_port __iomem *ehci_debug;
static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
static unsigned int dbgp_endpoint_out;
static unsigned int dbgp_endpoint_in;
struct ehci_dev {
u32 bus;
u32 slot;
u32 func;
};
static struct ehci_dev ehci_dev;
#define USB_DEBUG_DEVNUM 127
#ifdef DBGP_DEBUG
#define dbgp_printk printk
static void dbgp_ehci_status(char *str)
{
if (!ehci_debug)
return;
dbgp_printk("dbgp: %s\n", str);
dbgp_printk(" Debug control: %08x", readl(&ehci_debug->control));
dbgp_printk(" ehci cmd : %08x", readl(&ehci_regs->command));
dbgp_printk(" ehci conf flg: %08x\n",
readl(&ehci_regs->configured_flag));
dbgp_printk(" ehci status : %08x", readl(&ehci_regs->status));
dbgp_printk(" ehci portsc : %08x\n",
readl(&ehci_regs->port_status[dbgp_phys_port - 1]));
}
#else
static inline void dbgp_ehci_status(char *str) { }
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
}
#ifdef CONFIG_KGDB
static struct kgdb_io kgdbdbgp_io_ops;
#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
#else
#define dbgp_kgdb_mode (0)
#endif
/* Local version of HC_LENGTH macro as ehci struct is not available here */
#define EARLY_HC_LENGTH(p) (0x00ff & (p)) /* bits 7 : 0 */
/*
* USB Packet IDs (PIDs)
*/
/* token */
#define USB_PID_OUT 0xe1
#define USB_PID_IN 0x69
#define USB_PID_SOF 0xa5
#define USB_PID_SETUP 0x2d
/* handshake */
#define USB_PID_ACK 0xd2
#define USB_PID_NAK 0x5a
#define USB_PID_STALL 0x1e
#define USB_PID_NYET 0x96
/* data */
#define USB_PID_DATA0 0xc3
#define USB_PID_DATA1 0x4b
#define USB_PID_DATA2 0x87
#define USB_PID_MDATA 0x0f
/* Special */
#define USB_PID_PREAMBLE 0x3c
#define USB_PID_ERR 0x3c
#define USB_PID_SPLIT 0x78
#define USB_PID_PING 0xb4
#define USB_PID_UNDEF_0 0xf0
#define USB_PID_DATA_TOGGLE 0x88
#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
#define PCI_CAP_ID_EHCI_DEBUG 0xa
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 500
#define DBGP_MAX_PACKET 8
#define DBGP_TIMEOUT (250 * 1000)
#define DBGP_LOOPS 1000
static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
{
static int data0 = USB_PID_DATA1;
data0 ^= USB_PID_DATA_TOGGLE;
return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
}
static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
{
return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
}
static int dbgp_wait_until_complete(void)
{
u32 ctrl;
int ret;
ret = readl_poll_timeout_atomic(&ehci_debug->control, ctrl,
(ctrl & DBGP_DONE), 1, DBGP_TIMEOUT);
if (ret)
return -DBGP_TIMEOUT;
/*
* Now that we have observed the completed transaction,
* clear the done bit.
*/
writel(ctrl | DBGP_DONE, &ehci_debug->control);
return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
}
static inline void dbgp_mdelay(int ms)
{
int i;
while (ms--) {
for (i = 0; i < 1000; i++)
outb(0x1, 0x80);
}
}
static void dbgp_breath(void)
{
/* Sleep to give the debug port a chance to breathe */
}
static int dbgp_wait_until_done(unsigned ctrl, int loop)
{
u32 pids, lpid;
int ret;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
ret = dbgp_wait_until_complete();
pids = readl(&ehci_debug->pids);
lpid = DBGP_PID_GET(pids);
if (ret < 0) {
/* A -DBGP_TIMEOUT failure here means the device has
* failed, perhaps because it was unplugged, in which
* case we do not want to hang the system so the dbgp
* will be marked as unsafe to use. EHCI reset is the
* only way to recover if you unplug the dbgp device.
*/
if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
dbgp_not_safe = 1;
if (ret == -DBGP_ERR_BAD && --loop > 0)
goto retry;
return ret;
}
/*
* If the port is getting full or it has dropped data
* start pacing ourselves, not necessary but it's friendly.
*/
if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
dbgp_breath();
/* If I get a NACK reissue the transmission */
if (lpid == USB_PID_NAK) {
if (--loop > 0)
goto retry;
}
return ret;
}
static inline void dbgp_set_data(const void *buf, int size)
{
const unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = hi = 0;
for (i = 0; i < 4 && i < size; i++)
lo |= bytes[i] << (8*i);
for (; i < 8 && i < size; i++)
hi |= bytes[i] << (8*(i - 4));
writel(lo, &ehci_debug->data03);
writel(hi, &ehci_debug->data47);
}
static inline void dbgp_get_data(void *buf, int size)
{
unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = readl(&ehci_debug->data03);
hi = readl(&ehci_debug->data47);
for (i = 0; i < 4 && i < size; i++)
bytes[i] = (lo >> (8*i)) & 0xff;
for (; i < 8 && i < size; i++)
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
const char *bytes, int size)
{
int ret;
u32 addr;
u32 pids, ctrl;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_write_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
return ret;
}
static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
int size, int loops)
{
u32 pids, addr, ctrl;
int ret;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_read_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl &= ~DBGP_OUT;
ctrl |= DBGP_GO;
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl, loops);
if (ret < 0)
return ret;
if (size > ret)
size = ret;
dbgp_get_data(data, size);
return ret;
}
static int dbgp_control_msg(unsigned devnum, int requesttype,
int request, int value, int index, void *data, int size)
{
u32 pids, addr, ctrl;
struct usb_ctrlrequest req;
int read;
int ret;
read = (requesttype & USB_DIR_IN) != 0;
if (size > (read ? DBGP_MAX_PACKET : 0))
return -1;
/* Compute the control message */
req.bRequestType = requesttype;
req.bRequest = request;
req.wValue = cpu_to_le16(value);
req.wIndex = cpu_to_le16(index);
req.wLength = cpu_to_le16(size);
pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
addr = DBGP_EPADDR(devnum, 0);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, sizeof(req));
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
/* Send the setup message */
dbgp_set_data(&req, sizeof(req));
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl, DBGP_LOOPS);
if (ret < 0)
return ret;
/* Read the result */
return dbgp_bulk_read(devnum, 0, data, size, DBGP_LOOPS);
}
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
u8 pos;
int bytes;
if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
PCI_STATUS_CAP_LIST))
return 0;
pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
u8 id;
pos &= ~3;
id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = read_pci_config_byte(num, slot, func,
pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
{
u32 class;
class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
return 0;
return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
}
static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
{
u32 bus, slot, func;
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
unsigned cap;
cap = __find_dbgp(bus, slot, func);
if (!cap)
continue;
if (ehci_num-- != 0)
continue;
*rbus = bus;
*rslot = slot;
*rfunc = func;
return cap;
}
}
}
return 0;
}
static int dbgp_ehci_startup(void)
{
u32 ctrl, cmd, status;
int loop;
/* Claim ownership, but do not enable yet */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_OWNER;
ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
writel(ctrl, &ehci_debug->control);
udelay(1);
dbgp_ehci_status("EHCI startup");
/* Start the ehci running */
cmd = readl(&ehci_regs->command);
cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
cmd |= CMD_RUN;
writel(cmd, &ehci_regs->command);
/* Ensure everything is routed to the EHCI */
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
loop = 1000;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
break;
udelay(1);
} while (--loop > 0);
if (!loop) {
dbgp_printk("ehci can not be started\n");
return -ENODEV;
}
dbgp_printk("ehci started\n");
return 0;
}
static int dbgp_ehci_controller_reset(void)
{
int loop = 250 * 1000;
u32 cmd;
/* Reset the EHCI controller */
cmd = readl(&ehci_regs->command);
cmd |= CMD_RESET;
writel(cmd, &ehci_regs->command);
do {
cmd = readl(&ehci_regs->command);
} while ((cmd & CMD_RESET) && (--loop > 0));
if (!loop) {
dbgp_printk("can not reset ehci\n");
return -1;
}
dbgp_ehci_status("ehci reset done");
return 0;
}
static int ehci_wait_for_port(int port);
/* Return 0 on success
* Return -ENODEV for any general failure
* Return -EIO if wait for port fails
*/
static int _dbgp_external_startup(void)
{
int devnum;
struct usb_debug_descriptor dbgp_desc;
int ret;
u32 ctrl, portsc, cmd;
int dbg_port = dbgp_phys_port;
int tries = 3;
int reset_port_tries = 1;
int try_hard_once = 1;
try_port_reset_again:
ret = dbgp_ehci_startup();
if (ret)
return ret;
/* Wait for a device to show up in the debug port */
ret = ehci_wait_for_port(dbg_port);
if (ret < 0) {
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
if (!(portsc & PORT_CONNECT) && try_hard_once) {
/* Last ditch effort to try to force enable
* the debug device by using the packet test
* ehci command to try and wake it up. */
try_hard_once = 0;
cmd = readl(&ehci_regs->command);
cmd &= ~CMD_RUN;
writel(cmd, &ehci_regs->command);
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
portsc |= PORT_TEST_PKT;
writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
dbgp_ehci_status("Trying to force debug port online");
mdelay(50);
dbgp_ehci_controller_reset();
goto try_port_reset_again;
} else if (reset_port_tries--) {
goto try_port_reset_again;
}
dbgp_printk("No device found in debug port\n");
return -EIO;
}
dbgp_ehci_status("wait for port done");
/* Enable the debug port */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_CLAIM;
writel(ctrl, &ehci_debug->control);
ctrl = readl(&ehci_debug->control);
if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
dbgp_printk("No device in debug port\n");
writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
return -ENODEV;
}
dbgp_ehci_status("debug ported enabled");
/* Completely transfer the debug device to the debug controller */
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
portsc &= ~PORT_PE;
writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
dbgp_mdelay(100);
try_again:
/* Find the debug device and make it device number 127 */
for (devnum = 0; devnum <= 127; devnum++) {
ret = dbgp_control_msg(devnum,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
&dbgp_desc, sizeof(dbgp_desc));
if (ret > 0)
break;
}
if (devnum > 127) {
dbgp_printk("Could not find attached debug device\n");
goto err;
}
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
dbgp_endpoint_in = dbgp_desc.bDebugInEndpoint;
/* Move the device to 127 if it isn't already there */
if (devnum != USB_DEBUG_DEVNUM) {
ret = dbgp_control_msg(devnum,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
if (ret < 0) {
dbgp_printk("Could not move attached device to %d\n",
USB_DEBUG_DEVNUM);
goto err;
}
dbgp_printk("debug device renamed to 127\n");
}
/* Enable the debug interface */
ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
if (ret < 0) {
dbgp_printk(" Could not enable the debug device\n");
goto err;
}
dbgp_printk("debug interface enabled\n");
/* Perform a small write to get the even/odd data state in sync
*/
ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
if (ret < 0) {
dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
goto err;
}
dbgp_printk("small write done\n");
dbgp_not_safe = 0;
return 0;
err:
if (tries--)
goto try_again;
return -ENODEV;
}
static int ehci_reset_port(int port)
{
u32 portsc;
u32 delay_time, delay;
int loop;
dbgp_ehci_status("reset port");
/* Reset the usb debug port */
portsc = readl(&ehci_regs->port_status[port - 1]);
portsc &= ~PORT_PE;
portsc |= PORT_RESET;
writel(portsc, &ehci_regs->port_status[port - 1]);
delay = HUB_ROOT_RESET_TIME;
for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
delay_time += delay) {
dbgp_mdelay(delay);
portsc = readl(&ehci_regs->port_status[port - 1]);
if (!(portsc & PORT_RESET))
break;
}
if (portsc & PORT_RESET) {
/* force reset to complete */
loop = 100 * 1000;
writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
&ehci_regs->port_status[port - 1]);
do {
udelay(1);
portsc = readl(&ehci_regs->port_status[port-1]);
} while ((portsc & PORT_RESET) && (--loop > 0));
}
/* Device went away? */
if (!(portsc & PORT_CONNECT))
return -ENOTCONN;
/* bomb out completely if something weird happened */
if ((portsc & PORT_CSC))
return -EINVAL;
/* If we've finished resetting, then break out of the loop */
if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
return 0;
return -EBUSY;
}
static int ehci_wait_for_port(int port)
{
u32 status;
int ret, reps;
for (reps = 0; reps < 300; reps++) {
status = readl(&ehci_regs->status);
if (status & STS_PCD)
break;
dbgp_mdelay(1);
}
ret = ehci_reset_port(port);
if (ret == 0)
return 0;
return -ENOTCONN;
}
typedef void (*set_debug_port_t)(int port);
static void __init default_set_debug_port(int port)
{
}
static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
static void __init nvidia_set_debug_port(int port)
{
u32 dword;
dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x74);
dword &= ~(0x0f<<12);
dword |= ((port & 0x0f)<<12);
write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
dword);
dbgp_printk("set debug port to %d\n", port);
}
static void __init detect_set_debug_port(void)
{
u32 vendorid;
vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x00);
if ((vendorid & 0xffff) == 0x10de) {
dbgp_printk("using nvidia set_debug_port\n");
set_debug_port = nvidia_set_debug_port;
}
}
/* The code in early_ehci_bios_handoff() is derived from the usb pci
* quirk initialization, but altered so as to use the early PCI
* routines. */
#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
static void __init early_ehci_bios_handoff(void)
{
u32 hcc_params = readl(&ehci_caps->hcc_params);
int offset = (hcc_params >> 8) & 0xff;
u32 cap;
int msec;
if (!offset)
return;
cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset);
dbgp_printk("dbgp: ehci BIOS state %08x\n", cap);
if ((cap & 0xff) == 1 && (cap & EHCI_USBLEGSUP_BIOS)) {
dbgp_printk("dbgp: BIOS handoff\n");
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset + 3, 1);
}
/* if boot firmware now owns EHCI, spin till it hands it over. */
msec = 1000;
while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
mdelay(10);
msec -= 10;
cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset);
}
if (cap & EHCI_USBLEGSUP_BIOS) {
/* well, possibly buggy BIOS... try to shut it down,
* and hope nothing goes too wrong */
dbgp_printk("dbgp: BIOS handoff failed: %08x\n", cap);
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset + 2, 0);
}
/* just in case, always disable EHCI SMIs */
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
offset + EHCI_USBLEGCTLSTS, 0);
}
static int __init ehci_setup(void)
{
u32 ctrl, portsc, hcs_params;
u32 debug_port, new_debug_port = 0, n_ports;
int ret, i;
int port_map_tried;
int playtimes = 3;
early_ehci_bios_handoff();
try_next_time:
port_map_tried = 0;
try_next_port:
hcs_params = readl(&ehci_caps->hcs_params);
debug_port = HCS_DEBUG_PORT(hcs_params);
dbgp_phys_port = debug_port;
n_ports = HCS_N_PORTS(hcs_params);
dbgp_printk("debug_port: %d\n", debug_port);
dbgp_printk("n_ports: %d\n", n_ports);
dbgp_ehci_status("");
for (i = 1; i <= n_ports; i++) {
portsc = readl(&ehci_regs->port_status[i-1]);
dbgp_printk("portstatus%d: %08x\n", i, portsc);
}
if (port_map_tried && (new_debug_port != debug_port)) {
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
/* Only reset the controller if it is not already in the
* configured state */
if (!(readl(&ehci_regs->configured_flag) & FLAG_CF)) {
if (dbgp_ehci_controller_reset() != 0)
return -1;
} else {
dbgp_ehci_status("ehci skip - already configured");
}
ret = _dbgp_external_startup();
if (ret == -EIO)
goto next_debug_port;
if (ret < 0) {
/* Things didn't work so remove my claim */
ctrl = readl(&ehci_debug->control);
ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
writel(ctrl, &ehci_debug->control);
return -1;
}
return 0;
next_debug_port:
port_map_tried |= (1<<(debug_port - 1));
new_debug_port = ((debug_port-1+1)%n_ports) + 1;
if (port_map_tried != ((1<<n_ports) - 1)) {
set_debug_port(new_debug_port);
goto try_next_port;
}
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
int __init early_dbgp_init(char *s)
{
u32 debug_port, bar, offset;
u32 bus, slot, func, cap;
void __iomem *ehci_bar;
u32 dbgp_num;
u32 bar_val;
char *e;
int ret;
u8 byte;
if (!early_pci_allowed())
return -1;
dbgp_num = 0;
if (*s)
dbgp_num = simple_strtoul(s, &e, 10);
dbgp_printk("dbgp_num: %d\n", dbgp_num);
cap = find_dbgp(dbgp_num, &bus, &slot, &func);
if (!cap)
return -1;
dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
func);
debug_port = read_pci_config(bus, slot, func, cap);
bar = (debug_port >> 29) & 0x7;
bar = (bar * 4) + 0xc;
offset = (debug_port >> 16) & 0xfff;
dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
if (bar != PCI_BASE_ADDRESS_0) {
dbgp_printk("only debug ports on bar 1 handled.\n");
return -1;
}
bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
dbgp_printk("only simple 32bit mmio bars supported\n");
return -1;
}
/* double check if the mem space is enabled */
byte = read_pci_config_byte(bus, slot, func, 0x04);
if (!(byte & 0x2)) {
byte |= 0x02;
write_pci_config_byte(bus, slot, func, 0x04, byte);
dbgp_printk("mmio for ehci enabled\n");
}
/*
* FIXME I don't have the bar size so just guess PAGE_SIZE is more
* than enough. 1K is the biggest I have seen.
*/
set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
ehci_bar += bar_val & ~PAGE_MASK;
dbgp_printk("ehci_bar: %p\n", ehci_bar);
ehci_caps = ehci_bar;
ehci_regs = ehci_bar + EARLY_HC_LENGTH(readl(&ehci_caps->hc_capbase));
ehci_debug = ehci_bar + offset;
ehci_dev.bus = bus;
ehci_dev.slot = slot;
ehci_dev.func = func;
detect_set_debug_port();
ret = ehci_setup();
if (ret < 0) {
dbgp_printk("ehci_setup failed\n");
ehci_debug = NULL;
return -1;
}
dbgp_ehci_status("early_init_complete");
return 0;
}
static void early_dbgp_write(struct console *con, const char *str, u32 n)
{
int chunk;
char buf[DBGP_MAX_PACKET];
int use_cr = 0;
u32 cmd, ctrl;
int reset_run = 0;
if (!ehci_debug || dbgp_not_safe)
return;
cmd = readl(&ehci_regs->command);
if (unlikely(!(cmd & CMD_RUN))) {
/* If the ehci controller is not in the run state do extended
* checks to see if the acpi or some other initialization also
* reset the ehci debug port */
ctrl = readl(&ehci_debug->control);
if (!(ctrl & DBGP_ENABLED)) {
dbgp_not_safe = 1;
_dbgp_external_startup();
} else {
cmd |= CMD_RUN;
writel(cmd, &ehci_regs->command);
reset_run = 1;
}
}
while (n > 0) {
for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0;
str++, chunk++, n--) {
if (!use_cr && *str == '\n') {
use_cr = 1;
buf[chunk] = '\r';
str--;
n++;
continue;
}
if (use_cr)
use_cr = 0;
buf[chunk] = *str;
}
if (chunk > 0) {
dbgp_bulk_write(USB_DEBUG_DEVNUM,
dbgp_endpoint_out, buf, chunk);
}
}
if (unlikely(reset_run)) {
cmd = readl(&ehci_regs->command);
cmd &= ~CMD_RUN;
writel(cmd, &ehci_regs->command);
}
}
struct console early_dbgp_console = {
.name = "earlydbg",
.write = early_dbgp_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
#if IS_ENABLED(CONFIG_USB)
int dbgp_reset_prep(struct usb_hcd *hcd)
{
int ret = xen_dbgp_reset_prep(hcd);
u32 ctrl;
if (ret)
return ret;
dbgp_not_safe = 1;
if (!ehci_debug)
return 0;
if ((early_dbgp_console.index != -1 &&
!(early_dbgp_console.flags & CON_BOOT)) ||
dbgp_kgdb_mode)
return 1;
/* This means the console is not initialized, or should get
* shutdown so as to allow for reuse of the usb device, which
* means it is time to shutdown the usb debug port. */
ctrl = readl(&ehci_debug->control);
if (ctrl & DBGP_ENABLED) {
ctrl &= ~(DBGP_CLAIM);
writel(ctrl, &ehci_debug->control);
}
return 0;
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);
int dbgp_external_startup(struct usb_hcd *hcd)
{
return xen_dbgp_external_startup(hcd) ?: _dbgp_external_startup();
}
EXPORT_SYMBOL_GPL(dbgp_external_startup);
#endif /* USB */
#ifdef CONFIG_KGDB
static char kgdbdbgp_buf[DBGP_MAX_PACKET];
static int kgdbdbgp_buf_sz;
static int kgdbdbgp_buf_idx;
static int kgdbdbgp_loop_cnt = DBGP_LOOPS;
static int kgdbdbgp_read_char(void)
{
int ret;
if (kgdbdbgp_buf_idx < kgdbdbgp_buf_sz) {
char ch = kgdbdbgp_buf[kgdbdbgp_buf_idx++];
return ch;
}
ret = dbgp_bulk_read(USB_DEBUG_DEVNUM, dbgp_endpoint_in,
&kgdbdbgp_buf, DBGP_MAX_PACKET,
kgdbdbgp_loop_cnt);
if (ret <= 0)
return NO_POLL_CHAR;
kgdbdbgp_buf_sz = ret;
kgdbdbgp_buf_idx = 1;
return kgdbdbgp_buf[0];
}
static void kgdbdbgp_write_char(u8 chr)
{
early_dbgp_write(NULL, &chr, 1);
}
static struct kgdb_io kgdbdbgp_io_ops = {
.name = "kgdbdbgp",
.read_char = kgdbdbgp_read_char,
.write_char = kgdbdbgp_write_char,
};
static int kgdbdbgp_wait_time;
static int __init kgdbdbgp_parse_config(char *str)
{
char *ptr;
if (!ehci_debug) {
if (early_dbgp_init(str))
return -1;
}
ptr = strchr(str, ',');
if (ptr) {
ptr++;
kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
}
kgdb_register_io_module(&kgdbdbgp_io_ops);
if (early_dbgp_console.index != -1)
kgdbdbgp_io_ops.cons = &early_dbgp_console;
return 0;
}
early_param("kgdbdbgp", kgdbdbgp_parse_config);
static int kgdbdbgp_reader_thread(void *ptr)
{
int ret;
while (readl(&ehci_debug->control) & DBGP_ENABLED) {
kgdbdbgp_loop_cnt = 1;
ret = kgdbdbgp_read_char();
kgdbdbgp_loop_cnt = DBGP_LOOPS;
if (ret != NO_POLL_CHAR) {
if (ret == 0x3 || ret == '$') {
if (ret == '$')
kgdbdbgp_buf_idx--;
kgdb_breakpoint();
}
continue;
}
schedule_timeout_interruptible(kgdbdbgp_wait_time * HZ);
}
return 0;
}
static int __init kgdbdbgp_start_thread(void)
{
if (dbgp_kgdb_mode && kgdbdbgp_wait_time)
kthread_run(kgdbdbgp_reader_thread, NULL, "%s", "dbgp");
return 0;
}
device_initcall(kgdbdbgp_start_thread);
#endif /* CONFIG_KGDB */
| linux-master | drivers/usb/early/ehci-dbgp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbc.c - xHCI debug capability early driver
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/console.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/memblock.h>
#include <linux/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
#include <linux/bcd.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/usb/xhci-dbgp.h>
#include "../host/xhci.h"
#include "xhci-dbc.h"
static struct xdbc_state xdbc;
static bool early_console_keep;
#ifdef XDBC_TRACE
#define xdbc_trace trace_printk
#else
static inline void xdbc_trace(const char *fmt, ...) { }
#endif /* XDBC_TRACE */
static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
{
u64 val64, sz64, mask64;
void __iomem *base;
u32 val, sz;
u8 byte;
val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
if (val == 0xffffffff || sz == 0xffffffff) {
pr_notice("invalid mmio bar\n");
return NULL;
}
val64 = val & PCI_BASE_ADDRESS_MEM_MASK;
sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
mask64 = PCI_BASE_ADDRESS_MEM_MASK;
if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
val64 |= (u64)val << 32;
sz64 |= (u64)sz << 32;
mask64 |= ~0ULL << 32;
}
sz64 &= mask64;
if (!sz64) {
pr_notice("invalid mmio address\n");
return NULL;
}
sz64 = 1ULL << __ffs64(sz64);
/* Check if the mem space is enabled: */
byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
if (!(byte & PCI_COMMAND_MEMORY)) {
byte |= PCI_COMMAND_MEMORY;
write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
}
xdbc.xhci_start = val64;
xdbc.xhci_length = sz64;
base = early_ioremap(val64, sz64);
return base;
}
static void * __init xdbc_get_page(dma_addr_t *dma_addr)
{
void *virt;
virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!virt)
return NULL;
if (dma_addr)
*dma_addr = (dma_addr_t)__pa(virt);
return virt;
}
static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
{
u32 bus, dev, func, class;
for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
continue;
if (xdbc_num-- != 0)
continue;
*b = bus;
*d = dev;
*f = func;
return 0;
}
}
}
return -1;
}
static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
{
u32 result;
/* Can not use readl_poll_timeout_atomic() for early boot things */
do {
result = readl(ptr);
result &= mask;
if (result == done)
return 0;
udelay(delay);
wait -= delay;
} while (wait > 0);
return -ETIMEDOUT;
}
static void __init xdbc_bios_handoff(void)
{
int offset, timeout;
u32 val;
offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
val = readl(xdbc.xhci_base + offset);
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
if (timeout) {
pr_notice("failed to hand over xHCI control from BIOS\n");
writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
}
}
/* Disable BIOS SMIs and clear all SMI events: */
val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
val &= XHCI_LEGACY_DISABLE_SMI;
val |= XHCI_LEGACY_SMI_EVENTS;
writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
}
static int __init
xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
{
seg->trbs = xdbc_get_page(&seg->dma);
if (!seg->trbs)
return -ENOMEM;
ring->segment = seg;
return 0;
}
static void __init xdbc_free_ring(struct xdbc_ring *ring)
{
struct xdbc_segment *seg = ring->segment;
if (!seg)
return;
memblock_phys_free(seg->dma, PAGE_SIZE);
ring->segment = NULL;
}
static void xdbc_reset_ring(struct xdbc_ring *ring)
{
struct xdbc_segment *seg = ring->segment;
struct xdbc_trb *link_trb;
memset(seg->trbs, 0, PAGE_SIZE);
ring->enqueue = seg->trbs;
ring->dequeue = seg->trbs;
ring->cycle_state = 1;
if (ring != &xdbc.evt_ring) {
link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
}
}
static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
{
int i;
for (i = 0; i < size; i++)
s[i] = cpu_to_le16(c[i]);
}
static void xdbc_mem_init(void)
{
struct xdbc_ep_context *ep_in, *ep_out;
struct usb_string_descriptor *s_desc;
struct xdbc_erst_entry *entry;
struct xdbc_strings *strings;
struct xdbc_context *ctx;
unsigned int max_burst;
u32 string_length;
int index = 0;
u32 dev_info;
xdbc_reset_ring(&xdbc.evt_ring);
xdbc_reset_ring(&xdbc.in_ring);
xdbc_reset_ring(&xdbc.out_ring);
memset(xdbc.table_base, 0, PAGE_SIZE);
memset(xdbc.out_buf, 0, PAGE_SIZE);
/* Initialize event ring segment table: */
xdbc.erst_size = 16;
xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
index += XDBC_ERST_ENTRY_NUM;
entry = (struct xdbc_erst_entry *)xdbc.erst_base;
entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma);
entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
entry->__reserved_0 = 0;
/* Initialize ERST registers: */
writel(1, &xdbc.xdbc_reg->ersts);
xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
/* Debug capability contexts: */
xdbc.dbcc_size = 64 * 3;
xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
index += XDBC_DBCC_ENTRY_NUM;
/* Popluate the strings: */
xdbc.string_size = sizeof(struct xdbc_strings);
xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
strings = (struct xdbc_strings *)xdbc.string_base;
index += XDBC_STRING_ENTRY_NUM;
/* Serial string: */
s_desc = (struct usb_string_descriptor *)strings->serial;
s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
string_length = s_desc->bLength;
string_length <<= 8;
/* Product string: */
s_desc = (struct usb_string_descriptor *)strings->product;
s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
string_length += s_desc->bLength;
string_length <<= 8;
/* Manufacture string: */
s_desc = (struct usb_string_descriptor *)strings->manufacturer;
s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
s_desc->bDescriptorType = USB_DT_STRING;
xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
string_length += s_desc->bLength;
string_length <<= 8;
/* String0: */
strings->string0[0] = 4;
strings->string0[1] = USB_DT_STRING;
strings->string0[2] = 0x09;
strings->string0[3] = 0x04;
string_length += 4;
/* Populate info Context: */
ctx = (struct xdbc_context *)xdbc.dbcc_base;
ctx->info.string0 = cpu_to_le64(xdbc.string_dma);
ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
ctx->info.length = cpu_to_le32(string_length);
/* Populate bulk out endpoint context: */
max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
ep_out = (struct xdbc_ep_context *)&ctx->out;
ep_out->ep_info1 = 0;
ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
/* Populate bulk in endpoint context: */
ep_in = (struct xdbc_ep_context *)&ctx->in;
ep_in->ep_info1 = 0;
ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
/* Set DbC context and info registers: */
xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
writel(dev_info, &xdbc.xdbc_reg->devinfo1);
dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
writel(dev_info, &xdbc.xdbc_reg->devinfo2);
xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
}
static void xdbc_do_reset_debug_port(u32 id, u32 count)
{
void __iomem *ops_reg;
void __iomem *portsc;
u32 val, cap_length;
int i;
cap_length = readl(xdbc.xhci_base) & 0xff;
ops_reg = xdbc.xhci_base + cap_length;
id--;
for (i = id; i < (id + count); i++) {
portsc = ops_reg + 0x400 + i * 0x10;
val = readl(portsc);
if (!(val & PORT_CONNECT))
writel(val | PORT_RESET, portsc);
}
}
static void xdbc_reset_debug_port(void)
{
u32 val, port_offset, port_count;
int offset = 0;
do {
offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
if (!offset)
break;
val = readl(xdbc.xhci_base + offset);
if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
continue;
val = readl(xdbc.xhci_base + offset + 8);
port_offset = XHCI_EXT_PORT_OFF(val);
port_count = XHCI_EXT_PORT_COUNT(val);
xdbc_do_reset_debug_port(port_offset, port_count);
} while (1);
}
static void
xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xdbc_trb *trb, *link_trb;
trb = ring->enqueue;
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
++(ring->enqueue);
if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
link_trb = ring->enqueue;
if (ring->cycle_state)
link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
else
link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
ring->enqueue = ring->segment->trbs;
ring->cycle_state ^= 1;
}
}
static void xdbc_ring_doorbell(int target)
{
writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
}
static int xdbc_start(void)
{
u32 ctrl, status;
int ret;
ctrl = readl(&xdbc.xdbc_reg->control);
writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
if (ret) {
xdbc_trace("failed to initialize hardware\n");
return ret;
}
/* Reset port to avoid bus hang: */
if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
xdbc_reset_debug_port();
/* Wait for port connection: */
ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
if (ret) {
xdbc_trace("waiting for connection timed out\n");
return ret;
}
/* Wait for debug device to be configured: */
ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
if (ret) {
xdbc_trace("waiting for device configuration timed out\n");
return ret;
}
/* Check port number: */
status = readl(&xdbc.xdbc_reg->status);
if (!DCST_DEBUG_PORT(status)) {
xdbc_trace("invalid root hub port number\n");
return -ENODEV;
}
xdbc.port_number = DCST_DEBUG_PORT(status);
xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
readl(&xdbc.xdbc_reg->control), xdbc.port_number);
return 0;
}
static int xdbc_bulk_transfer(void *data, int size, bool read)
{
struct xdbc_ring *ring;
struct xdbc_trb *trb;
u32 length, control;
u32 cycle;
u64 addr;
if (size > XDBC_MAX_PACKET) {
xdbc_trace("bad parameter, size %d\n", size);
return -EINVAL;
}
if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
!(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
(!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
(read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
return -EIO;
}
ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
trb = ring->enqueue;
cycle = ring->cycle_state;
length = TRB_LEN(size);
control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
if (cycle)
control &= cpu_to_le32(~TRB_CYCLE);
else
control |= cpu_to_le32(TRB_CYCLE);
if (read) {
memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
addr = xdbc.in_dma;
xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
} else {
memcpy_and_pad(xdbc.out_buf, XDBC_MAX_PACKET, data, size, 0);
addr = xdbc.out_dma;
xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
}
xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
/*
* Add a barrier between writes of trb fields and flipping
* the cycle bit:
*/
wmb();
if (cycle)
trb->field[3] |= cpu_to_le32(cycle);
else
trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
return size;
}
static int xdbc_handle_external_reset(void)
{
int ret = 0;
xdbc.flags = 0;
writel(0, &xdbc.xdbc_reg->control);
ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
if (ret)
goto reset_out;
xdbc_mem_init();
ret = xdbc_start();
if (ret < 0)
goto reset_out;
xdbc_trace("dbc recovered\n");
xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
return 0;
reset_out:
xdbc_trace("failed to recover from external reset\n");
return ret;
}
static int __init xdbc_early_setup(void)
{
int ret;
writel(0, &xdbc.xdbc_reg->control);
ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
if (ret)
return ret;
/* Allocate the table page: */
xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
if (!xdbc.table_base)
return -ENOMEM;
/* Get and store the transfer buffer: */
xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
if (!xdbc.out_buf)
return -ENOMEM;
/* Allocate the event ring: */
ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
if (ret < 0)
return ret;
/* Allocate IN/OUT endpoint transfer rings: */
ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
if (ret < 0)
return ret;
ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
if (ret < 0)
return ret;
xdbc_mem_init();
ret = xdbc_start();
if (ret < 0) {
writel(0, &xdbc.xdbc_reg->control);
return ret;
}
xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
return 0;
}
int __init early_xdbc_parse_parameter(char *s, int keep_early)
{
unsigned long dbgp_num = 0;
u32 bus, dev, func, offset;
char *e;
int ret;
if (!early_pci_allowed())
return -EPERM;
early_console_keep = keep_early;
if (xdbc.xdbc_reg)
return 0;
if (*s) {
dbgp_num = simple_strtoul(s, &e, 10);
if (s == e)
dbgp_num = 0;
}
pr_notice("dbgp_num: %lu\n", dbgp_num);
/* Locate the host controller: */
ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
if (ret) {
pr_notice("failed to locate xhci host\n");
return -ENODEV;
}
xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
xdbc.bus = bus;
xdbc.dev = dev;
xdbc.func = func;
/* Map the IO memory: */
xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
if (!xdbc.xhci_base)
return -EINVAL;
/* Locate DbC registers: */
offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
if (!offset) {
pr_notice("xhci host doesn't support debug capability\n");
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
xdbc.xhci_base = NULL;
xdbc.xhci_length = 0;
return -ENODEV;
}
xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
return 0;
}
int __init early_xdbc_setup_hardware(void)
{
int ret;
if (!xdbc.xdbc_reg)
return -ENODEV;
xdbc_bios_handoff();
raw_spin_lock_init(&xdbc.lock);
ret = xdbc_early_setup();
if (ret) {
pr_notice("failed to setup the connection to host\n");
xdbc_free_ring(&xdbc.evt_ring);
xdbc_free_ring(&xdbc.out_ring);
xdbc_free_ring(&xdbc.in_ring);
if (xdbc.table_dma)
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
if (xdbc.out_dma)
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
xdbc.table_base = NULL;
xdbc.out_buf = NULL;
}
return ret;
}
static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
{
u32 port_reg;
port_reg = readl(&xdbc.xdbc_reg->portsc);
if (port_reg & PORTSC_CONN_CHANGE) {
xdbc_trace("connect status change event\n");
/* Check whether cable unplugged: */
if (!(port_reg & PORTSC_CONN_STATUS)) {
xdbc.flags = 0;
xdbc_trace("cable unplugged\n");
}
}
if (port_reg & PORTSC_RESET_CHANGE)
xdbc_trace("port reset change event\n");
if (port_reg & PORTSC_LINK_CHANGE)
xdbc_trace("port link status change event\n");
if (port_reg & PORTSC_CONFIG_CHANGE)
xdbc_trace("config error change\n");
/* Write back the value to clear RW1C bits: */
writel(port_reg, &xdbc.xdbc_reg->portsc);
}
static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
{
u32 comp_code;
int ep_id;
comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
switch (comp_code) {
case COMP_SUCCESS:
case COMP_SHORT_PACKET:
break;
case COMP_TRB_ERROR:
case COMP_BABBLE_DETECTED_ERROR:
case COMP_USB_TRANSACTION_ERROR:
case COMP_STALL_ERROR:
default:
if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
xdbc.flags |= XDBC_FLAGS_OUT_STALL;
if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
xdbc.flags |= XDBC_FLAGS_IN_STALL;
xdbc_trace("endpoint %d stalled\n", ep_id);
break;
}
if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
} else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
} else {
xdbc_trace("invalid endpoint id %d\n", ep_id);
}
}
static void xdbc_handle_events(void)
{
struct xdbc_trb *evt_trb;
bool update_erdp = false;
u32 reg;
u8 cmd;
cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
if (!(cmd & PCI_COMMAND_MASTER)) {
cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
}
if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
return;
/* Handle external reset events: */
reg = readl(&xdbc.xdbc_reg->control);
if (!(reg & CTRL_DBC_ENABLE)) {
if (xdbc_handle_external_reset()) {
xdbc_trace("failed to recover connection\n");
return;
}
}
/* Handle configure-exit event: */
reg = readl(&xdbc.xdbc_reg->control);
if (reg & CTRL_DBC_RUN_CHANGE) {
writel(reg, &xdbc.xdbc_reg->control);
if (reg & CTRL_DBC_RUN)
xdbc.flags |= XDBC_FLAGS_CONFIGURED;
else
xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
}
/* Handle endpoint stall event: */
reg = readl(&xdbc.xdbc_reg->control);
if (reg & CTRL_HALT_IN_TR) {
xdbc.flags |= XDBC_FLAGS_IN_STALL;
} else {
xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
}
if (reg & CTRL_HALT_OUT_TR)
xdbc.flags |= XDBC_FLAGS_OUT_STALL;
else
xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
/* Handle the events in the event ring: */
evt_trb = xdbc.evt_ring.dequeue;
while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
/*
* Add a barrier between reading the cycle flag and any
* reads of the event's flags/data below:
*/
rmb();
switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_PORT_STATUS):
xdbc_handle_port_status(evt_trb);
break;
case TRB_TYPE(TRB_TRANSFER):
xdbc_handle_tx_event(evt_trb);
break;
default:
break;
}
++(xdbc.evt_ring.dequeue);
if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
xdbc.evt_ring.cycle_state ^= 1;
}
evt_trb = xdbc.evt_ring.dequeue;
update_erdp = true;
}
/* Update event ring dequeue pointer: */
if (update_erdp)
xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
}
static int xdbc_bulk_write(const char *bytes, int size)
{
int ret, timeout = 0;
unsigned long flags;
retry:
if (in_nmi()) {
if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
return -EAGAIN;
} else {
raw_spin_lock_irqsave(&xdbc.lock, flags);
}
xdbc_handle_events();
/* Check completion of the previous request: */
if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
udelay(100);
timeout += 100;
goto retry;
}
if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
xdbc_trace("previous transfer not completed yet\n");
return -ETIMEDOUT;
}
ret = xdbc_bulk_transfer((void *)bytes, size, false);
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
return ret;
}
static void early_xdbc_write(struct console *con, const char *str, u32 n)
{
/* static variables are zeroed, so buf is always NULL terminated */
static char buf[XDBC_MAX_PACKET + 1];
int chunk, ret;
int use_cr = 0;
if (!xdbc.xdbc_reg)
return;
while (n > 0) {
for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
if (!use_cr && *str == '\n') {
use_cr = 1;
buf[chunk] = '\r';
str--;
n++;
continue;
}
if (use_cr)
use_cr = 0;
buf[chunk] = *str;
}
if (chunk > 0) {
ret = xdbc_bulk_write(buf, chunk);
if (ret < 0)
xdbc_trace("missed message {%s}\n", buf);
}
}
}
static struct console early_xdbc_console = {
.name = "earlyxdbc",
.write = early_xdbc_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
void __init early_xdbc_register_console(void)
{
if (early_console)
return;
early_console = &early_xdbc_console;
if (early_console_keep)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
}
static void xdbc_unregister_console(void)
{
if (console_is_registered(&early_xdbc_console))
unregister_console(&early_xdbc_console);
}
static int xdbc_scrub_function(void *ptr)
{
unsigned long flags;
while (true) {
raw_spin_lock_irqsave(&xdbc.lock, flags);
xdbc_handle_events();
if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
break;
}
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
schedule_timeout_interruptible(1);
}
xdbc_unregister_console();
writel(0, &xdbc.xdbc_reg->control);
xdbc_trace("dbc scrub function exits\n");
return 0;
}
static int __init xdbc_init(void)
{
unsigned long flags;
void __iomem *base;
int ret = 0;
u32 offset;
if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
return 0;
/*
* It's time to shut down the DbC, so that the debug
* port can be reused by the host controller:
*/
if (early_xdbc_console.index == -1 ||
(early_xdbc_console.flags & CON_BOOT)) {
xdbc_trace("hardware not used anymore\n");
goto free_and_quit;
}
base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
if (!base) {
xdbc_trace("failed to remap the io address\n");
ret = -ENOMEM;
goto free_and_quit;
}
raw_spin_lock_irqsave(&xdbc.lock, flags);
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
xdbc.xhci_base = base;
offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
raw_spin_unlock_irqrestore(&xdbc.lock, flags);
kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
return 0;
free_and_quit:
xdbc_free_ring(&xdbc.evt_ring);
xdbc_free_ring(&xdbc.out_ring);
xdbc_free_ring(&xdbc.in_ring);
memblock_phys_free(xdbc.table_dma, PAGE_SIZE);
memblock_phys_free(xdbc.out_dma, PAGE_SIZE);
writel(0, &xdbc.xdbc_reg->control);
early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
return ret;
}
subsys_initcall(xdbc_init);
| linux-master | drivers/usb/early/xhci-dbc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Glue code for the ISP1760 driver and bus
* Currently there is support for
* - OpenFirmware
* - PCI
* - PDEV (generic platform device centralized driver model)
*
* (c) 2007 Sebastian Siewior <[email protected]>
* Copyright 2021 Linaro, Rui Miguel Silva <[email protected]>
*
*/
#include <linux/usb.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include "isp1760-core.h"
#include "isp1760-regs.h"
#ifdef CONFIG_USB_PCI
#include <linux/pci.h>
#endif
#ifdef CONFIG_USB_PCI
static int isp1761_pci_init(struct pci_dev *dev)
{
resource_size_t mem_start;
resource_size_t mem_length;
u8 __iomem *iobase;
u8 latency, limit;
int retry_count;
u32 reg_data;
/* Grab the PLX PCI shared memory of the ISP 1761 we need */
mem_start = pci_resource_start(dev, 3);
mem_length = pci_resource_len(dev, 3);
if (mem_length < 0xffff) {
printk(KERN_ERR "memory length for this resource is wrong\n");
return -ENOMEM;
}
if (!request_mem_region(mem_start, mem_length, "ISP-PCI")) {
printk(KERN_ERR "host controller already in use\n");
return -EBUSY;
}
/* map available memory */
iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "Error ioremap failed\n");
release_mem_region(mem_start, mem_length);
return -ENOMEM;
}
/* bad pci latencies can contribute to overruns */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
if (latency) {
pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
if (limit && limit < latency)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
}
/* Try to check whether we can access Scratch Register of
* Host Controller or not. The initial PCI access is retried until
* local init for the PCI bridge is completed
*/
retry_count = 20;
reg_data = 0;
while ((reg_data != 0xFACE) && retry_count) {
/*by default host is in 16bit mode, so
* io operations at this stage must be 16 bit
* */
writel(0xface, iobase + ISP176x_HC_SCRATCH);
udelay(100);
reg_data = readl(iobase + ISP176x_HC_SCRATCH) & 0x0000ffff;
retry_count--;
}
iounmap(iobase);
release_mem_region(mem_start, mem_length);
/* Host Controller presence is detected by writing to scratch register
* and reading back and checking the contents are same or not
*/
if (reg_data != 0xFACE) {
dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
return -ENOMEM;
}
/* Grab the PLX PCI mem maped port start address we need */
mem_start = pci_resource_start(dev, 0);
mem_length = pci_resource_len(dev, 0);
if (!request_mem_region(mem_start, mem_length, "ISP1761 IO MEM")) {
printk(KERN_ERR "request region #1\n");
return -EBUSY;
}
iobase = ioremap(mem_start, mem_length);
if (!iobase) {
printk(KERN_ERR "ioremap #1\n");
release_mem_region(mem_start, mem_length);
return -ENOMEM;
}
/* configure PLX PCI chip to pass interrupts */
#define PLX_INT_CSR_REG 0x68
reg_data = readl(iobase + PLX_INT_CSR_REG);
reg_data |= 0x900;
writel(reg_data, iobase + PLX_INT_CSR_REG);
/* done with PLX IO access */
iounmap(iobase);
release_mem_region(mem_start, mem_length);
return 0;
}
static int isp1761_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned int devflags = 0;
int ret;
if (!dev->irq)
return -ENODEV;
if (pci_enable_device(dev) < 0)
return -ENODEV;
ret = isp1761_pci_init(dev);
if (ret < 0)
goto error;
pci_set_master(dev);
ret = isp1760_register(&dev->resource[3], dev->irq, 0, &dev->dev,
devflags);
if (ret < 0)
goto error;
return 0;
error:
pci_disable_device(dev);
return ret;
}
static void isp1761_pci_remove(struct pci_dev *dev)
{
isp1760_unregister(&dev->dev);
pci_disable_device(dev);
}
static void isp1761_pci_shutdown(struct pci_dev *dev)
{
printk(KERN_ERR "ips1761_pci_shutdown\n");
}
static const struct pci_device_id isp1760_plx[] = {
{
.class = PCI_CLASS_BRIDGE_OTHER << 8,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x5406,
.subvendor = PCI_VENDOR_ID_PLX,
.subdevice = 0x9054,
},
{ }
};
MODULE_DEVICE_TABLE(pci, isp1760_plx);
static struct pci_driver isp1761_pci_driver = {
.name = "isp1760",
.id_table = isp1760_plx,
.probe = isp1761_pci_probe,
.remove = isp1761_pci_remove,
.shutdown = isp1761_pci_shutdown,
};
#endif
static int isp1760_plat_probe(struct platform_device *pdev)
{
unsigned long irqflags;
unsigned int devflags = 0;
struct resource *mem_res;
int irq;
int ret;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
irqflags = irq_get_trigger_type(irq);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
struct device_node *dp = pdev->dev.of_node;
u32 bus_width = 0;
if (of_device_is_compatible(dp, "nxp,usb-isp1761"))
devflags |= ISP1760_FLAG_ISP1761;
if (of_device_is_compatible(dp, "nxp,usb-isp1763"))
devflags |= ISP1760_FLAG_ISP1763;
/*
* Some systems wire up only 8 of 16 data lines or
* 16 of the 32 data lines
*/
of_property_read_u32(dp, "bus-width", &bus_width);
if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
else if (bus_width == 8)
devflags |= ISP1760_FLAG_BUS_WIDTH_8;
if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL)
devflags |= ISP1760_FLAG_PERIPHERAL_EN;
if (of_property_read_bool(dp, "analog-oc"))
devflags |= ISP1760_FLAG_ANALOG_OC;
if (of_property_read_bool(dp, "dack-polarity"))
devflags |= ISP1760_FLAG_DACK_POL_HIGH;
if (of_property_read_bool(dp, "dreq-polarity"))
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
} else {
pr_err("isp1760: no platform data\n");
return -ENXIO;
}
ret = isp1760_register(mem_res, irq, irqflags, &pdev->dev, devflags);
if (ret < 0)
return ret;
pr_info("ISP1760 USB device initialised\n");
return 0;
}
static void isp1760_plat_remove(struct platform_device *pdev)
{
isp1760_unregister(&pdev->dev);
}
#ifdef CONFIG_OF
static const struct of_device_id isp1760_of_match[] = {
{ .compatible = "nxp,usb-isp1760", },
{ .compatible = "nxp,usb-isp1761", },
{ .compatible = "nxp,usb-isp1763", },
{ },
};
MODULE_DEVICE_TABLE(of, isp1760_of_match);
#endif
static struct platform_driver isp1760_plat_driver = {
.probe = isp1760_plat_probe,
.remove_new = isp1760_plat_remove,
.driver = {
.name = "isp1760",
.of_match_table = of_match_ptr(isp1760_of_match),
},
};
static int __init isp1760_init(void)
{
int ret, any_ret = -ENODEV;
isp1760_init_kmem_once();
ret = platform_driver_register(&isp1760_plat_driver);
if (!ret)
any_ret = 0;
#ifdef CONFIG_USB_PCI
ret = pci_register_driver(&isp1761_pci_driver);
if (!ret)
any_ret = 0;
#endif
if (any_ret)
isp1760_deinit_kmem_cache();
return any_ret;
}
module_init(isp1760_init);
static void __exit isp1760_exit(void)
{
platform_driver_unregister(&isp1760_plat_driver);
#ifdef CONFIG_USB_PCI
pci_unregister_driver(&isp1761_pci_driver);
#endif
isp1760_deinit_kmem_cache();
}
module_exit(isp1760_exit);
| linux-master | drivers/usb/isp1760/isp1760-if.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1761 device controller
*
* Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Ideas on Board Oy
*
* Contacts:
* Laurent Pinchart <[email protected]>
* Rui Miguel Silva <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include "isp1760-core.h"
#include "isp1760-regs.h"
#include "isp1760-udc.h"
#define ISP1760_VBUS_POLL_INTERVAL msecs_to_jiffies(500)
struct isp1760_request {
struct usb_request req;
struct list_head queue;
struct isp1760_ep *ep;
unsigned int packet_size;
};
static inline struct isp1760_udc *gadget_to_udc(struct usb_gadget *gadget)
{
return container_of(gadget, struct isp1760_udc, gadget);
}
static inline struct isp1760_ep *ep_to_udc_ep(struct usb_ep *ep)
{
return container_of(ep, struct isp1760_ep, ep);
}
static inline struct isp1760_request *req_to_udc_req(struct usb_request *req)
{
return container_of(req, struct isp1760_request, req);
}
static u32 isp1760_udc_read(struct isp1760_udc *udc, u16 field)
{
return isp1760_field_read(udc->fields, field);
}
static void isp1760_udc_write(struct isp1760_udc *udc, u16 field, u32 val)
{
isp1760_field_write(udc->fields, field, val);
}
static u32 isp1760_udc_read_raw(struct isp1760_udc *udc, u16 reg)
{
__le32 val;
regmap_raw_read(udc->regs, reg, &val, 4);
return le32_to_cpu(val);
}
static u16 isp1760_udc_read_raw16(struct isp1760_udc *udc, u16 reg)
{
__le16 val;
regmap_raw_read(udc->regs, reg, &val, 2);
return le16_to_cpu(val);
}
static void isp1760_udc_write_raw(struct isp1760_udc *udc, u16 reg, u32 val)
{
__le32 val_le = cpu_to_le32(val);
regmap_raw_write(udc->regs, reg, &val_le, 4);
}
static void isp1760_udc_write_raw16(struct isp1760_udc *udc, u16 reg, u16 val)
{
__le16 val_le = cpu_to_le16(val);
regmap_raw_write(udc->regs, reg, &val_le, 2);
}
static void isp1760_udc_set(struct isp1760_udc *udc, u32 field)
{
isp1760_udc_write(udc, field, 0xFFFFFFFF);
}
static void isp1760_udc_clear(struct isp1760_udc *udc, u32 field)
{
isp1760_udc_write(udc, field, 0);
}
static bool isp1760_udc_is_set(struct isp1760_udc *udc, u32 field)
{
return !!isp1760_udc_read(udc, field);
}
/* -----------------------------------------------------------------------------
* Endpoint Management
*/
static struct isp1760_ep *isp1760_udc_find_ep(struct isp1760_udc *udc,
u16 index)
{
unsigned int i;
if (index == 0)
return &udc->ep[0];
for (i = 1; i < ARRAY_SIZE(udc->ep); ++i) {
if (udc->ep[i].addr == index)
return udc->ep[i].desc ? &udc->ep[i] : NULL;
}
return NULL;
}
static void __isp1760_udc_select_ep(struct isp1760_udc *udc,
struct isp1760_ep *ep, int dir)
{
isp1760_udc_write(udc, DC_ENDPIDX, ep->addr & USB_ENDPOINT_NUMBER_MASK);
if (dir == USB_DIR_IN)
isp1760_udc_set(udc, DC_EPDIR);
else
isp1760_udc_clear(udc, DC_EPDIR);
}
/**
* isp1760_udc_select_ep - Select an endpoint for register access
* @ep: The endpoint
* @udc: Reference to the device controller
*
* The ISP1761 endpoint registers are banked. This function selects the target
* endpoint for banked register access. The selection remains valid until the
* next call to this function, the next direct access to the EPINDEX register
* or the next reset, whichever comes first.
*
* Called with the UDC spinlock held.
*/
static void isp1760_udc_select_ep(struct isp1760_udc *udc,
struct isp1760_ep *ep)
{
__isp1760_udc_select_ep(udc, ep, ep->addr & USB_ENDPOINT_DIR_MASK);
}
/* Called with the UDC spinlock held. */
static void isp1760_udc_ctrl_send_status(struct isp1760_ep *ep, int dir)
{
struct isp1760_udc *udc = ep->udc;
/*
* Proceed to the status stage. The status stage data packet flows in
* the direction opposite to the data stage data packets, we thus need
* to select the OUT/IN endpoint for IN/OUT transfers.
*/
if (dir == USB_DIR_IN)
isp1760_udc_clear(udc, DC_EPDIR);
else
isp1760_udc_set(udc, DC_EPDIR);
isp1760_udc_write(udc, DC_ENDPIDX, 1);
isp1760_udc_set(udc, DC_STATUS);
/*
* The hardware will terminate the request automatically and go back to
* the setup stage without notifying us.
*/
udc->ep0_state = ISP1760_CTRL_SETUP;
}
/* Called without the UDC spinlock held. */
static void isp1760_udc_request_complete(struct isp1760_ep *ep,
struct isp1760_request *req,
int status)
{
struct isp1760_udc *udc = ep->udc;
unsigned long flags;
dev_dbg(ep->udc->isp->dev, "completing request %p with status %d\n",
req, status);
req->ep = NULL;
req->req.status = status;
req->req.complete(&ep->ep, &req->req);
spin_lock_irqsave(&udc->lock, flags);
/*
* When completing control OUT requests, move to the status stage after
* calling the request complete callback. This gives the gadget an
* opportunity to stall the control transfer if needed.
*/
if (status == 0 && ep->addr == 0 && udc->ep0_dir == USB_DIR_OUT)
isp1760_udc_ctrl_send_status(ep, USB_DIR_OUT);
spin_unlock_irqrestore(&udc->lock, flags);
}
static void isp1760_udc_ctrl_send_stall(struct isp1760_ep *ep)
{
struct isp1760_udc *udc = ep->udc;
unsigned long flags;
dev_dbg(ep->udc->isp->dev, "%s(ep%02x)\n", __func__, ep->addr);
spin_lock_irqsave(&udc->lock, flags);
/* Stall both the IN and OUT endpoints. */
__isp1760_udc_select_ep(udc, ep, USB_DIR_OUT);
isp1760_udc_set(udc, DC_STALL);
__isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
isp1760_udc_set(udc, DC_STALL);
/* A protocol stall completes the control transaction. */
udc->ep0_state = ISP1760_CTRL_SETUP;
spin_unlock_irqrestore(&udc->lock, flags);
}
/* -----------------------------------------------------------------------------
* Data Endpoints
*/
/* Called with the UDC spinlock held. */
static bool isp1760_udc_receive(struct isp1760_ep *ep,
struct isp1760_request *req)
{
struct isp1760_udc *udc = ep->udc;
unsigned int len;
u32 *buf;
int i;
isp1760_udc_select_ep(udc, ep);
len = isp1760_udc_read(udc, DC_BUFLEN);
dev_dbg(udc->isp->dev, "%s: received %u bytes (%u/%u done)\n",
__func__, len, req->req.actual, req->req.length);
len = min(len, req->req.length - req->req.actual);
if (!len) {
/*
* There's no data to be read from the FIFO, acknowledge the RX
* interrupt by clearing the buffer.
*
* TODO: What if another packet arrives in the meantime ? The
* datasheet doesn't clearly document how this should be
* handled.
*/
isp1760_udc_set(udc, DC_CLBUF);
return false;
}
buf = req->req.buf + req->req.actual;
/*
* Make sure not to read more than one extra byte, otherwise data from
* the next packet might be removed from the FIFO.
*/
for (i = len; i > 2; i -= 4, ++buf)
*buf = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
if (i > 0)
*(u16 *)buf = isp1760_udc_read_raw16(udc, ISP176x_DC_DATAPORT);
req->req.actual += len;
/*
* TODO: The short_not_ok flag isn't supported yet, but isn't used by
* any gadget driver either.
*/
dev_dbg(udc->isp->dev,
"%s: req %p actual/length %u/%u maxpacket %u packet size %u\n",
__func__, req, req->req.actual, req->req.length, ep->maxpacket,
len);
ep->rx_pending = false;
/*
* Complete the request if all data has been received or if a short
* packet has been received.
*/
if (req->req.actual == req->req.length || len < ep->maxpacket) {
list_del(&req->queue);
return true;
}
return false;
}
static void isp1760_udc_transmit(struct isp1760_ep *ep,
struct isp1760_request *req)
{
struct isp1760_udc *udc = ep->udc;
u32 *buf = req->req.buf + req->req.actual;
int i;
req->packet_size = min(req->req.length - req->req.actual,
ep->maxpacket);
dev_dbg(udc->isp->dev, "%s: transferring %u bytes (%u/%u done)\n",
__func__, req->packet_size, req->req.actual,
req->req.length);
__isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
if (req->packet_size)
isp1760_udc_write(udc, DC_BUFLEN, req->packet_size);
/*
* Make sure not to write more than one extra byte, otherwise extra data
* will stay in the FIFO and will be transmitted during the next control
* request. The endpoint control CLBUF bit is supposed to allow flushing
* the FIFO for this kind of conditions, but doesn't seem to work.
*/
for (i = req->packet_size; i > 2; i -= 4, ++buf)
isp1760_udc_write_raw(udc, ISP176x_DC_DATAPORT, *buf);
if (i > 0)
isp1760_udc_write_raw16(udc, ISP176x_DC_DATAPORT, *(u16 *)buf);
if (ep->addr == 0)
isp1760_udc_set(udc, DC_DSEN);
if (!req->packet_size)
isp1760_udc_set(udc, DC_VENDP);
}
static void isp1760_ep_rx_ready(struct isp1760_ep *ep)
{
struct isp1760_udc *udc = ep->udc;
struct isp1760_request *req;
bool complete;
spin_lock(&udc->lock);
if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_OUT) {
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "%s: invalid ep0 state %u\n", __func__,
udc->ep0_state);
return;
}
if (ep->addr != 0 && !ep->desc) {
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__,
ep->addr);
return;
}
if (list_empty(&ep->queue)) {
ep->rx_pending = true;
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "%s: ep%02x (%p) has no request queued\n",
__func__, ep->addr, ep);
return;
}
req = list_first_entry(&ep->queue, struct isp1760_request,
queue);
complete = isp1760_udc_receive(ep, req);
spin_unlock(&udc->lock);
if (complete)
isp1760_udc_request_complete(ep, req, 0);
}
static void isp1760_ep_tx_complete(struct isp1760_ep *ep)
{
struct isp1760_udc *udc = ep->udc;
struct isp1760_request *complete = NULL;
struct isp1760_request *req;
bool need_zlp;
spin_lock(&udc->lock);
if (ep->addr == 0 && udc->ep0_state != ISP1760_CTRL_DATA_IN) {
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "TX IRQ: invalid endpoint state %u\n",
udc->ep0_state);
return;
}
if (list_empty(&ep->queue)) {
/*
* This can happen for the control endpoint when the reply to
* the GET_STATUS IN control request is sent directly by the
* setup IRQ handler. Just proceed to the status stage.
*/
if (ep->addr == 0) {
isp1760_udc_ctrl_send_status(ep, USB_DIR_IN);
spin_unlock(&udc->lock);
return;
}
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "%s: ep%02x has no request queued\n",
__func__, ep->addr);
return;
}
req = list_first_entry(&ep->queue, struct isp1760_request,
queue);
req->req.actual += req->packet_size;
need_zlp = req->req.actual == req->req.length &&
!(req->req.length % ep->maxpacket) &&
req->packet_size && req->req.zero;
dev_dbg(udc->isp->dev,
"TX IRQ: req %p actual/length %u/%u maxpacket %u packet size %u zero %u need zlp %u\n",
req, req->req.actual, req->req.length, ep->maxpacket,
req->packet_size, req->req.zero, need_zlp);
/*
* Complete the request if all data has been sent and we don't need to
* transmit a zero length packet.
*/
if (req->req.actual == req->req.length && !need_zlp) {
complete = req;
list_del(&req->queue);
if (ep->addr == 0)
isp1760_udc_ctrl_send_status(ep, USB_DIR_IN);
if (!list_empty(&ep->queue))
req = list_first_entry(&ep->queue,
struct isp1760_request, queue);
else
req = NULL;
}
/*
* Transmit the next packet or start the next request, if any.
*
* TODO: If the endpoint is stalled the next request shouldn't be
* started, but what about the next packet ?
*/
if (req)
isp1760_udc_transmit(ep, req);
spin_unlock(&udc->lock);
if (complete)
isp1760_udc_request_complete(ep, complete, 0);
}
static int __isp1760_udc_set_halt(struct isp1760_ep *ep, bool halt)
{
struct isp1760_udc *udc = ep->udc;
dev_dbg(udc->isp->dev, "%s: %s halt on ep%02x\n", __func__,
halt ? "set" : "clear", ep->addr);
if (ep->desc && usb_endpoint_xfer_isoc(ep->desc)) {
dev_dbg(udc->isp->dev, "%s: ep%02x is isochronous\n", __func__,
ep->addr);
return -EINVAL;
}
isp1760_udc_select_ep(udc, ep);
if (halt)
isp1760_udc_set(udc, DC_STALL);
else
isp1760_udc_clear(udc, DC_STALL);
if (ep->addr == 0) {
/* When halting the control endpoint, stall both IN and OUT. */
__isp1760_udc_select_ep(udc, ep, USB_DIR_IN);
if (halt)
isp1760_udc_set(udc, DC_STALL);
else
isp1760_udc_clear(udc, DC_STALL);
} else if (!halt) {
/* Reset the data PID by cycling the endpoint enable bit. */
isp1760_udc_clear(udc, DC_EPENABLE);
isp1760_udc_set(udc, DC_EPENABLE);
/*
* Disabling the endpoint emptied the transmit FIFO, fill it
* again if a request is pending.
*
* TODO: Does the gadget framework require synchronizatino with
* the TX IRQ handler ?
*/
if ((ep->addr & USB_DIR_IN) && !list_empty(&ep->queue)) {
struct isp1760_request *req;
req = list_first_entry(&ep->queue,
struct isp1760_request, queue);
isp1760_udc_transmit(ep, req);
}
}
ep->halted = halt;
return 0;
}
/* -----------------------------------------------------------------------------
* Control Endpoint
*/
static int isp1760_udc_get_status(struct isp1760_udc *udc,
const struct usb_ctrlrequest *req)
{
struct isp1760_ep *ep;
u16 status;
if (req->wLength != cpu_to_le16(2) || req->wValue != cpu_to_le16(0))
return -EINVAL;
switch (req->bRequestType) {
case USB_DIR_IN | USB_RECIP_DEVICE:
status = udc->devstatus;
break;
case USB_DIR_IN | USB_RECIP_INTERFACE:
status = 0;
break;
case USB_DIR_IN | USB_RECIP_ENDPOINT:
ep = isp1760_udc_find_ep(udc, le16_to_cpu(req->wIndex));
if (!ep)
return -EINVAL;
status = 0;
if (ep->halted)
status |= 1 << USB_ENDPOINT_HALT;
break;
default:
return -EINVAL;
}
isp1760_udc_set(udc, DC_EPDIR);
isp1760_udc_write(udc, DC_ENDPIDX, 1);
isp1760_udc_write(udc, DC_BUFLEN, 2);
isp1760_udc_write_raw16(udc, ISP176x_DC_DATAPORT, status);
isp1760_udc_set(udc, DC_DSEN);
dev_dbg(udc->isp->dev, "%s: status 0x%04x\n", __func__, status);
return 0;
}
static int isp1760_udc_set_address(struct isp1760_udc *udc, u16 addr)
{
if (addr > 127) {
dev_dbg(udc->isp->dev, "invalid device address %u\n", addr);
return -EINVAL;
}
if (udc->gadget.state != USB_STATE_DEFAULT &&
udc->gadget.state != USB_STATE_ADDRESS) {
dev_dbg(udc->isp->dev, "can't set address in state %u\n",
udc->gadget.state);
return -EINVAL;
}
usb_gadget_set_state(&udc->gadget, addr ? USB_STATE_ADDRESS :
USB_STATE_DEFAULT);
isp1760_udc_write(udc, DC_DEVADDR, addr);
isp1760_udc_set(udc, DC_DEVEN);
spin_lock(&udc->lock);
isp1760_udc_ctrl_send_status(&udc->ep[0], USB_DIR_OUT);
spin_unlock(&udc->lock);
return 0;
}
static bool isp1760_ep0_setup_standard(struct isp1760_udc *udc,
struct usb_ctrlrequest *req)
{
bool stall;
switch (req->bRequest) {
case USB_REQ_GET_STATUS:
return isp1760_udc_get_status(udc, req);
case USB_REQ_CLEAR_FEATURE:
switch (req->bRequestType) {
case USB_DIR_OUT | USB_RECIP_DEVICE: {
/* TODO: Handle remote wakeup feature. */
return true;
}
case USB_DIR_OUT | USB_RECIP_ENDPOINT: {
u16 index = le16_to_cpu(req->wIndex);
struct isp1760_ep *ep;
if (req->wLength != cpu_to_le16(0) ||
req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
return true;
ep = isp1760_udc_find_ep(udc, index);
if (!ep)
return true;
spin_lock(&udc->lock);
/*
* If the endpoint is wedged only the gadget can clear
* the halt feature. Pretend success in that case, but
* keep the endpoint halted.
*/
if (!ep->wedged)
stall = __isp1760_udc_set_halt(ep, false);
else
stall = false;
if (!stall)
isp1760_udc_ctrl_send_status(&udc->ep[0],
USB_DIR_OUT);
spin_unlock(&udc->lock);
return stall;
}
default:
return true;
}
break;
case USB_REQ_SET_FEATURE:
switch (req->bRequestType) {
case USB_DIR_OUT | USB_RECIP_DEVICE: {
/* TODO: Handle remote wakeup and test mode features */
return true;
}
case USB_DIR_OUT | USB_RECIP_ENDPOINT: {
u16 index = le16_to_cpu(req->wIndex);
struct isp1760_ep *ep;
if (req->wLength != cpu_to_le16(0) ||
req->wValue != cpu_to_le16(USB_ENDPOINT_HALT))
return true;
ep = isp1760_udc_find_ep(udc, index);
if (!ep)
return true;
spin_lock(&udc->lock);
stall = __isp1760_udc_set_halt(ep, true);
if (!stall)
isp1760_udc_ctrl_send_status(&udc->ep[0],
USB_DIR_OUT);
spin_unlock(&udc->lock);
return stall;
}
default:
return true;
}
break;
case USB_REQ_SET_ADDRESS:
if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
return true;
return isp1760_udc_set_address(udc, le16_to_cpu(req->wValue));
case USB_REQ_SET_CONFIGURATION:
if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
return true;
if (udc->gadget.state != USB_STATE_ADDRESS &&
udc->gadget.state != USB_STATE_CONFIGURED)
return true;
stall = udc->driver->setup(&udc->gadget, req) < 0;
if (stall)
return true;
usb_gadget_set_state(&udc->gadget, req->wValue ?
USB_STATE_CONFIGURED : USB_STATE_ADDRESS);
/*
* SET_CONFIGURATION (and SET_INTERFACE) must reset the halt
* feature on all endpoints. There is however no need to do so
* explicitly here as the gadget driver will disable and
* reenable endpoints, clearing the halt feature.
*/
return false;
default:
return udc->driver->setup(&udc->gadget, req) < 0;
}
}
static void isp1760_ep0_setup(struct isp1760_udc *udc)
{
union {
struct usb_ctrlrequest r;
u32 data[2];
} req;
unsigned int count;
bool stall = false;
spin_lock(&udc->lock);
isp1760_udc_set(udc, DC_EP0SETUP);
count = isp1760_udc_read(udc, DC_BUFLEN);
if (count != sizeof(req)) {
spin_unlock(&udc->lock);
dev_err(udc->isp->dev, "invalid length %u for setup packet\n",
count);
isp1760_udc_ctrl_send_stall(&udc->ep[0]);
return;
}
req.data[0] = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
req.data[1] = isp1760_udc_read_raw(udc, ISP176x_DC_DATAPORT);
if (udc->ep0_state != ISP1760_CTRL_SETUP) {
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev, "unexpected SETUP packet\n");
return;
}
/* Move to the data stage. */
if (!req.r.wLength)
udc->ep0_state = ISP1760_CTRL_STATUS;
else if (req.r.bRequestType & USB_DIR_IN)
udc->ep0_state = ISP1760_CTRL_DATA_IN;
else
udc->ep0_state = ISP1760_CTRL_DATA_OUT;
udc->ep0_dir = req.r.bRequestType & USB_DIR_IN;
udc->ep0_length = le16_to_cpu(req.r.wLength);
spin_unlock(&udc->lock);
dev_dbg(udc->isp->dev,
"%s: bRequestType 0x%02x bRequest 0x%02x wValue 0x%04x wIndex 0x%04x wLength 0x%04x\n",
__func__, req.r.bRequestType, req.r.bRequest,
le16_to_cpu(req.r.wValue), le16_to_cpu(req.r.wIndex),
le16_to_cpu(req.r.wLength));
if ((req.r.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
stall = isp1760_ep0_setup_standard(udc, &req.r);
else
stall = udc->driver->setup(&udc->gadget, &req.r) < 0;
if (stall)
isp1760_udc_ctrl_send_stall(&udc->ep[0]);
}
/* -----------------------------------------------------------------------------
* Gadget Endpoint Operations
*/
static int isp1760_ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct isp1760_ep *uep = ep_to_udc_ep(ep);
struct isp1760_udc *udc = uep->udc;
unsigned long flags;
unsigned int type;
dev_dbg(uep->udc->isp->dev, "%s\n", __func__);
/*
* Validate the descriptor. The control endpoint can't be enabled
* manually.
*/
if (desc->bDescriptorType != USB_DT_ENDPOINT ||
desc->bEndpointAddress == 0 ||
desc->bEndpointAddress != uep->addr ||
le16_to_cpu(desc->wMaxPacketSize) > ep->maxpacket) {
dev_dbg(udc->isp->dev,
"%s: invalid descriptor type %u addr %02x ep addr %02x max packet size %u/%u\n",
__func__, desc->bDescriptorType,
desc->bEndpointAddress, uep->addr,
le16_to_cpu(desc->wMaxPacketSize), ep->maxpacket);
return -EINVAL;
}
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_ISOC:
type = ISP176x_DC_ENDPTYP_ISOC;
break;
case USB_ENDPOINT_XFER_BULK:
type = ISP176x_DC_ENDPTYP_BULK;
break;
case USB_ENDPOINT_XFER_INT:
type = ISP176x_DC_ENDPTYP_INTERRUPT;
break;
case USB_ENDPOINT_XFER_CONTROL:
default:
dev_dbg(udc->isp->dev, "%s: control endpoints unsupported\n",
__func__);
return -EINVAL;
}
spin_lock_irqsave(&udc->lock, flags);
uep->desc = desc;
uep->maxpacket = le16_to_cpu(desc->wMaxPacketSize);
uep->rx_pending = false;
uep->halted = false;
uep->wedged = false;
isp1760_udc_select_ep(udc, uep);
isp1760_udc_write(udc, DC_FFOSZ, uep->maxpacket);
isp1760_udc_write(udc, DC_BUFLEN, uep->maxpacket);
isp1760_udc_write(udc, DC_ENDPTYP, type);
isp1760_udc_set(udc, DC_EPENABLE);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int isp1760_ep_disable(struct usb_ep *ep)
{
struct isp1760_ep *uep = ep_to_udc_ep(ep);
struct isp1760_udc *udc = uep->udc;
struct isp1760_request *req, *nreq;
LIST_HEAD(req_list);
unsigned long flags;
dev_dbg(udc->isp->dev, "%s\n", __func__);
spin_lock_irqsave(&udc->lock, flags);
if (!uep->desc) {
dev_dbg(udc->isp->dev, "%s: endpoint not enabled\n", __func__);
spin_unlock_irqrestore(&udc->lock, flags);
return -EINVAL;
}
uep->desc = NULL;
uep->maxpacket = 0;
isp1760_udc_select_ep(udc, uep);
isp1760_udc_clear(udc, DC_EPENABLE);
isp1760_udc_clear(udc, DC_ENDPTYP);
/* TODO Synchronize with the IRQ handler */
list_splice_init(&uep->queue, &req_list);
spin_unlock_irqrestore(&udc->lock, flags);
list_for_each_entry_safe(req, nreq, &req_list, queue) {
list_del(&req->queue);
isp1760_udc_request_complete(uep, req, -ESHUTDOWN);
}
return 0;
}
static struct usb_request *isp1760_ep_alloc_request(struct usb_ep *ep,
gfp_t gfp_flags)
{
struct isp1760_request *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
return &req->req;
}
static void isp1760_ep_free_request(struct usb_ep *ep, struct usb_request *_req)
{
struct isp1760_request *req = req_to_udc_req(_req);
kfree(req);
}
static int isp1760_ep_queue(struct usb_ep *ep, struct usb_request *_req,
gfp_t gfp_flags)
{
struct isp1760_request *req = req_to_udc_req(_req);
struct isp1760_ep *uep = ep_to_udc_ep(ep);
struct isp1760_udc *udc = uep->udc;
bool complete = false;
unsigned long flags;
int ret = 0;
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&udc->lock, flags);
dev_dbg(udc->isp->dev,
"%s: req %p (%u bytes%s) ep %p(0x%02x)\n", __func__, _req,
_req->length, _req->zero ? " (zlp)" : "", uep, uep->addr);
req->ep = uep;
if (uep->addr == 0) {
if (_req->length != udc->ep0_length &&
udc->ep0_state != ISP1760_CTRL_DATA_IN) {
dev_dbg(udc->isp->dev,
"%s: invalid length %u for req %p\n",
__func__, _req->length, req);
ret = -EINVAL;
goto done;
}
switch (udc->ep0_state) {
case ISP1760_CTRL_DATA_IN:
dev_dbg(udc->isp->dev, "%s: transmitting req %p\n",
__func__, req);
list_add_tail(&req->queue, &uep->queue);
isp1760_udc_transmit(uep, req);
break;
case ISP1760_CTRL_DATA_OUT:
list_add_tail(&req->queue, &uep->queue);
__isp1760_udc_select_ep(udc, uep, USB_DIR_OUT);
isp1760_udc_set(udc, DC_DSEN);
break;
case ISP1760_CTRL_STATUS:
complete = true;
break;
default:
dev_dbg(udc->isp->dev, "%s: invalid ep0 state\n",
__func__);
ret = -EINVAL;
break;
}
} else if (uep->desc) {
bool empty = list_empty(&uep->queue);
list_add_tail(&req->queue, &uep->queue);
if ((uep->addr & USB_DIR_IN) && !uep->halted && empty)
isp1760_udc_transmit(uep, req);
else if (!(uep->addr & USB_DIR_IN) && uep->rx_pending)
complete = isp1760_udc_receive(uep, req);
} else {
dev_dbg(udc->isp->dev,
"%s: can't queue request to disabled ep%02x\n",
__func__, uep->addr);
ret = -ESHUTDOWN;
}
done:
if (ret < 0)
req->ep = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
if (complete)
isp1760_udc_request_complete(uep, req, 0);
return ret;
}
static int isp1760_ep_dequeue(struct usb_ep *ep, struct usb_request *_req)
{
struct isp1760_request *req = req_to_udc_req(_req);
struct isp1760_ep *uep = ep_to_udc_ep(ep);
struct isp1760_udc *udc = uep->udc;
unsigned long flags;
dev_dbg(uep->udc->isp->dev, "%s(ep%02x)\n", __func__, uep->addr);
spin_lock_irqsave(&udc->lock, flags);
if (req->ep != uep)
req = NULL;
else
list_del(&req->queue);
spin_unlock_irqrestore(&udc->lock, flags);
if (!req)
return -EINVAL;
isp1760_udc_request_complete(uep, req, -ECONNRESET);
return 0;
}
static int __isp1760_ep_set_halt(struct isp1760_ep *uep, bool stall, bool wedge)
{
struct isp1760_udc *udc = uep->udc;
int ret;
if (!uep->addr) {
/*
* Halting the control endpoint is only valid as a delayed error
* response to a SETUP packet. Make sure EP0 is in the right
* stage and that the gadget isn't trying to clear the halt
* condition.
*/
if (WARN_ON(udc->ep0_state == ISP1760_CTRL_SETUP || !stall ||
wedge)) {
return -EINVAL;
}
}
if (uep->addr && !uep->desc) {
dev_dbg(udc->isp->dev, "%s: ep%02x is disabled\n", __func__,
uep->addr);
return -EINVAL;
}
if (uep->addr & USB_DIR_IN) {
/* Refuse to halt IN endpoints with active transfers. */
if (!list_empty(&uep->queue)) {
dev_dbg(udc->isp->dev,
"%s: ep%02x has request pending\n", __func__,
uep->addr);
return -EAGAIN;
}
}
ret = __isp1760_udc_set_halt(uep, stall);
if (ret < 0)
return ret;
if (!uep->addr) {
/*
* Stalling EP0 completes the control transaction, move back to
* the SETUP state.
*/
udc->ep0_state = ISP1760_CTRL_SETUP;
return 0;
}
if (wedge)
uep->wedged = true;
else if (!stall)
uep->wedged = false;
return 0;
}
static int isp1760_ep_set_halt(struct usb_ep *ep, int value)
{
struct isp1760_ep *uep = ep_to_udc_ep(ep);
unsigned long flags;
int ret;
dev_dbg(uep->udc->isp->dev, "%s: %s halt on ep%02x\n", __func__,
value ? "set" : "clear", uep->addr);
spin_lock_irqsave(&uep->udc->lock, flags);
ret = __isp1760_ep_set_halt(uep, value, false);
spin_unlock_irqrestore(&uep->udc->lock, flags);
return ret;
}
static int isp1760_ep_set_wedge(struct usb_ep *ep)
{
struct isp1760_ep *uep = ep_to_udc_ep(ep);
unsigned long flags;
int ret;
dev_dbg(uep->udc->isp->dev, "%s: set wedge on ep%02x)\n", __func__,
uep->addr);
spin_lock_irqsave(&uep->udc->lock, flags);
ret = __isp1760_ep_set_halt(uep, true, true);
spin_unlock_irqrestore(&uep->udc->lock, flags);
return ret;
}
static void isp1760_ep_fifo_flush(struct usb_ep *ep)
{
struct isp1760_ep *uep = ep_to_udc_ep(ep);
struct isp1760_udc *udc = uep->udc;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
isp1760_udc_select_ep(udc, uep);
/*
* Set the CLBUF bit twice to flush both buffers in case double
* buffering is enabled.
*/
isp1760_udc_set(udc, DC_CLBUF);
isp1760_udc_set(udc, DC_CLBUF);
spin_unlock_irqrestore(&udc->lock, flags);
}
static const struct usb_ep_ops isp1760_ep_ops = {
.enable = isp1760_ep_enable,
.disable = isp1760_ep_disable,
.alloc_request = isp1760_ep_alloc_request,
.free_request = isp1760_ep_free_request,
.queue = isp1760_ep_queue,
.dequeue = isp1760_ep_dequeue,
.set_halt = isp1760_ep_set_halt,
.set_wedge = isp1760_ep_set_wedge,
.fifo_flush = isp1760_ep_fifo_flush,
};
/* -----------------------------------------------------------------------------
* Device States
*/
/* Called with the UDC spinlock held. */
static void isp1760_udc_connect(struct isp1760_udc *udc)
{
usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
mod_timer(&udc->vbus_timer, jiffies + ISP1760_VBUS_POLL_INTERVAL);
}
/* Called with the UDC spinlock held. */
static void isp1760_udc_disconnect(struct isp1760_udc *udc)
{
if (udc->gadget.state < USB_STATE_POWERED)
return;
dev_dbg(udc->isp->dev, "Device disconnected in state %u\n",
udc->gadget.state);
udc->gadget.speed = USB_SPEED_UNKNOWN;
usb_gadget_set_state(&udc->gadget, USB_STATE_ATTACHED);
if (udc->driver->disconnect)
udc->driver->disconnect(&udc->gadget);
del_timer(&udc->vbus_timer);
/* TODO Reset all endpoints ? */
}
static void isp1760_udc_init_hw(struct isp1760_udc *udc)
{
u32 intconf = udc->is_isp1763 ? ISP1763_DC_INTCONF : ISP176x_DC_INTCONF;
u32 intena = udc->is_isp1763 ? ISP1763_DC_INTENABLE :
ISP176x_DC_INTENABLE;
/*
* The device controller currently shares its interrupt with the host
* controller, the DC_IRQ polarity and signaling mode are ignored. Set
* the to active-low level-triggered.
*
* Configure the control, in and out pipes to generate interrupts on
* ACK tokens only (and NYET for the out pipe). The default
* configuration also generates an interrupt on the first NACK token.
*/
isp1760_reg_write(udc->regs, intconf,
ISP176x_DC_CDBGMOD_ACK | ISP176x_DC_DDBGMODIN_ACK |
ISP176x_DC_DDBGMODOUT_ACK);
isp1760_reg_write(udc->regs, intena, DC_IEPRXTX(7) |
DC_IEPRXTX(6) | DC_IEPRXTX(5) | DC_IEPRXTX(4) |
DC_IEPRXTX(3) | DC_IEPRXTX(2) | DC_IEPRXTX(1) |
DC_IEPRXTX(0) | ISP176x_DC_IEP0SETUP |
ISP176x_DC_IEVBUS | ISP176x_DC_IERESM |
ISP176x_DC_IESUSP | ISP176x_DC_IEHS_STA |
ISP176x_DC_IEBRST);
if (udc->connected)
isp1760_set_pullup(udc->isp, true);
isp1760_udc_set(udc, DC_DEVEN);
}
static void isp1760_udc_reset(struct isp1760_udc *udc)
{
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/*
* The bus reset has reset most registers to their default value,
* reinitialize the UDC hardware.
*/
isp1760_udc_init_hw(udc);
udc->ep0_state = ISP1760_CTRL_SETUP;
udc->gadget.speed = USB_SPEED_FULL;
usb_gadget_udc_reset(&udc->gadget, udc->driver);
spin_unlock_irqrestore(&udc->lock, flags);
}
static void isp1760_udc_suspend(struct isp1760_udc *udc)
{
if (udc->gadget.state < USB_STATE_DEFAULT)
return;
if (udc->driver->suspend)
udc->driver->suspend(&udc->gadget);
}
static void isp1760_udc_resume(struct isp1760_udc *udc)
{
if (udc->gadget.state < USB_STATE_DEFAULT)
return;
if (udc->driver->resume)
udc->driver->resume(&udc->gadget);
}
/* -----------------------------------------------------------------------------
* Gadget Operations
*/
static int isp1760_udc_get_frame(struct usb_gadget *gadget)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
return isp1760_udc_read(udc, DC_FRAMENUM);
}
static int isp1760_udc_wakeup(struct usb_gadget *gadget)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
dev_dbg(udc->isp->dev, "%s\n", __func__);
return -ENOTSUPP;
}
static int isp1760_udc_set_selfpowered(struct usb_gadget *gadget,
int is_selfpowered)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
if (is_selfpowered)
udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
else
udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static int isp1760_udc_pullup(struct usb_gadget *gadget, int is_on)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
isp1760_set_pullup(udc->isp, is_on);
udc->connected = is_on;
return 0;
}
static int isp1760_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
unsigned long flags;
/* The hardware doesn't support low speed. */
if (driver->max_speed < USB_SPEED_FULL) {
dev_err(udc->isp->dev, "Invalid gadget driver\n");
return -EINVAL;
}
spin_lock_irqsave(&udc->lock, flags);
if (udc->driver) {
dev_err(udc->isp->dev, "UDC already has a gadget driver\n");
spin_unlock_irqrestore(&udc->lock, flags);
return -EBUSY;
}
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
dev_dbg(udc->isp->dev, "starting UDC with driver %s\n",
driver->function);
udc->devstatus = 0;
udc->connected = true;
usb_gadget_set_state(&udc->gadget, USB_STATE_ATTACHED);
/* DMA isn't supported yet, don't enable the DMA clock. */
isp1760_udc_set(udc, DC_GLINTENA);
isp1760_udc_init_hw(udc);
dev_dbg(udc->isp->dev, "UDC started with driver %s\n",
driver->function);
return 0;
}
static int isp1760_udc_stop(struct usb_gadget *gadget)
{
struct isp1760_udc *udc = gadget_to_udc(gadget);
u32 mode_reg = udc->is_isp1763 ? ISP1763_DC_MODE : ISP176x_DC_MODE;
unsigned long flags;
dev_dbg(udc->isp->dev, "%s\n", __func__);
del_timer_sync(&udc->vbus_timer);
isp1760_reg_write(udc->regs, mode_reg, 0);
spin_lock_irqsave(&udc->lock, flags);
udc->driver = NULL;
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static const struct usb_gadget_ops isp1760_udc_ops = {
.get_frame = isp1760_udc_get_frame,
.wakeup = isp1760_udc_wakeup,
.set_selfpowered = isp1760_udc_set_selfpowered,
.pullup = isp1760_udc_pullup,
.udc_start = isp1760_udc_start,
.udc_stop = isp1760_udc_stop,
};
/* -----------------------------------------------------------------------------
* Interrupt Handling
*/
static u32 isp1760_udc_irq_get_status(struct isp1760_udc *udc)
{
u32 status;
if (udc->is_isp1763) {
status = isp1760_reg_read(udc->regs, ISP1763_DC_INTERRUPT)
& isp1760_reg_read(udc->regs, ISP1763_DC_INTENABLE);
isp1760_reg_write(udc->regs, ISP1763_DC_INTERRUPT, status);
} else {
status = isp1760_reg_read(udc->regs, ISP176x_DC_INTERRUPT)
& isp1760_reg_read(udc->regs, ISP176x_DC_INTENABLE);
isp1760_reg_write(udc->regs, ISP176x_DC_INTERRUPT, status);
}
return status;
}
static irqreturn_t isp1760_udc_irq(int irq, void *dev)
{
struct isp1760_udc *udc = dev;
unsigned int i;
u32 status;
status = isp1760_udc_irq_get_status(udc);
if (status & ISP176x_DC_IEVBUS) {
dev_dbg(udc->isp->dev, "%s(VBUS)\n", __func__);
/* The VBUS interrupt is only triggered when VBUS appears. */
spin_lock(&udc->lock);
isp1760_udc_connect(udc);
spin_unlock(&udc->lock);
}
if (status & ISP176x_DC_IEBRST) {
dev_dbg(udc->isp->dev, "%s(BRST)\n", __func__);
isp1760_udc_reset(udc);
}
for (i = 0; i <= 7; ++i) {
struct isp1760_ep *ep = &udc->ep[i*2];
if (status & DC_IEPTX(i)) {
dev_dbg(udc->isp->dev, "%s(EPTX%u)\n", __func__, i);
isp1760_ep_tx_complete(ep);
}
if (status & DC_IEPRX(i)) {
dev_dbg(udc->isp->dev, "%s(EPRX%u)\n", __func__, i);
isp1760_ep_rx_ready(i ? ep - 1 : ep);
}
}
if (status & ISP176x_DC_IEP0SETUP) {
dev_dbg(udc->isp->dev, "%s(EP0SETUP)\n", __func__);
isp1760_ep0_setup(udc);
}
if (status & ISP176x_DC_IERESM) {
dev_dbg(udc->isp->dev, "%s(RESM)\n", __func__);
isp1760_udc_resume(udc);
}
if (status & ISP176x_DC_IESUSP) {
dev_dbg(udc->isp->dev, "%s(SUSP)\n", __func__);
spin_lock(&udc->lock);
if (!isp1760_udc_is_set(udc, DC_VBUSSTAT))
isp1760_udc_disconnect(udc);
else
isp1760_udc_suspend(udc);
spin_unlock(&udc->lock);
}
if (status & ISP176x_DC_IEHS_STA) {
dev_dbg(udc->isp->dev, "%s(HS_STA)\n", __func__);
udc->gadget.speed = USB_SPEED_HIGH;
}
return status ? IRQ_HANDLED : IRQ_NONE;
}
static void isp1760_udc_vbus_poll(struct timer_list *t)
{
struct isp1760_udc *udc = from_timer(udc, t, vbus_timer);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
if (!(isp1760_udc_is_set(udc, DC_VBUSSTAT)))
isp1760_udc_disconnect(udc);
else if (udc->gadget.state >= USB_STATE_POWERED)
mod_timer(&udc->vbus_timer,
jiffies + ISP1760_VBUS_POLL_INTERVAL);
spin_unlock_irqrestore(&udc->lock, flags);
}
/* -----------------------------------------------------------------------------
* Registration
*/
static void isp1760_udc_init_eps(struct isp1760_udc *udc)
{
unsigned int i;
INIT_LIST_HEAD(&udc->gadget.ep_list);
for (i = 0; i < ARRAY_SIZE(udc->ep); ++i) {
struct isp1760_ep *ep = &udc->ep[i];
unsigned int ep_num = (i + 1) / 2;
bool is_in = !(i & 1);
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
ep->addr = (ep_num && is_in ? USB_DIR_IN : USB_DIR_OUT)
| ep_num;
ep->desc = NULL;
sprintf(ep->name, "ep%u%s", ep_num,
ep_num ? (is_in ? "in" : "out") : "");
ep->ep.ops = &isp1760_ep_ops;
ep->ep.name = ep->name;
/*
* Hardcode the maximum packet sizes for now, to 64 bytes for
* the control endpoint and 512 bytes for all other endpoints.
* This fits in the 8kB FIFO without double-buffering.
*/
if (ep_num == 0) {
usb_ep_set_maxpacket_limit(&ep->ep, 64);
ep->ep.caps.type_control = true;
ep->ep.caps.dir_in = true;
ep->ep.caps.dir_out = true;
ep->maxpacket = 64;
udc->gadget.ep0 = &ep->ep;
} else {
usb_ep_set_maxpacket_limit(&ep->ep, 512);
ep->ep.caps.type_iso = true;
ep->ep.caps.type_bulk = true;
ep->ep.caps.type_int = true;
ep->maxpacket = 0;
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
}
if (is_in)
ep->ep.caps.dir_in = true;
else
ep->ep.caps.dir_out = true;
}
}
static int isp1760_udc_init(struct isp1760_udc *udc)
{
u32 mode_reg = udc->is_isp1763 ? ISP1763_DC_MODE : ISP176x_DC_MODE;
u16 scratch;
u32 chipid;
/*
* Check that the controller is present by writing to the scratch
* register, modifying the bus pattern by reading from the chip ID
* register, and reading the scratch register value back. The chip ID
* and scratch register contents must match the expected values.
*/
isp1760_udc_write(udc, DC_SCRATCH, 0xbabe);
chipid = isp1760_udc_read(udc, DC_CHIP_ID_HIGH) << 16;
chipid |= isp1760_udc_read(udc, DC_CHIP_ID_LOW);
scratch = isp1760_udc_read(udc, DC_SCRATCH);
if (scratch != 0xbabe) {
dev_err(udc->isp->dev,
"udc: scratch test failed (0x%04x/0x%08x)\n",
scratch, chipid);
return -ENODEV;
}
if (chipid != 0x00011582 && chipid != 0x00158210 &&
chipid != 0x00176320) {
dev_err(udc->isp->dev, "udc: invalid chip ID 0x%08x\n", chipid);
return -ENODEV;
}
/* Reset the device controller. */
isp1760_udc_set(udc, DC_SFRESET);
usleep_range(10000, 11000);
isp1760_reg_write(udc->regs, mode_reg, 0);
usleep_range(10000, 11000);
return 0;
}
int isp1760_udc_register(struct isp1760_device *isp, int irq,
unsigned long irqflags)
{
struct isp1760_udc *udc = &isp->udc;
int ret;
udc->irq = -1;
udc->isp = isp;
spin_lock_init(&udc->lock);
timer_setup(&udc->vbus_timer, isp1760_udc_vbus_poll, 0);
ret = isp1760_udc_init(udc);
if (ret < 0)
return ret;
udc->irqname = kasprintf(GFP_KERNEL, "%s (udc)", dev_name(isp->dev));
if (!udc->irqname)
return -ENOMEM;
ret = request_irq(irq, isp1760_udc_irq, IRQF_SHARED | irqflags,
udc->irqname, udc);
if (ret < 0)
goto error;
udc->irq = irq;
/*
* Initialize the gadget static fields and register its device. Gadget
* fields that vary during the life time of the gadget are initialized
* by the UDC core.
*/
udc->gadget.ops = &isp1760_udc_ops;
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.name = "isp1761_udc";
isp1760_udc_init_eps(udc);
ret = usb_add_gadget_udc(isp->dev, &udc->gadget);
if (ret < 0)
goto error;
return 0;
error:
if (udc->irq >= 0)
free_irq(udc->irq, udc);
kfree(udc->irqname);
return ret;
}
void isp1760_udc_unregister(struct isp1760_device *isp)
{
struct isp1760_udc *udc = &isp->udc;
if (!udc->isp)
return;
usb_del_gadget_udc(&udc->gadget);
free_irq(udc->irq, udc);
kfree(udc->irqname);
}
| linux-master | drivers/usb/isp1760/isp1760-udc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
* However, the code might contain some bugs. What doesn't work for sure is:
* - ISO
* - OTG
e The interrupt line is configured as active low, level.
*
* (c) 2007 Sebastian Siewior <[email protected]>
*
* (c) 2011 Arvid Brodin <[email protected]>
*
* Copyright 2021 Linaro, Rui Miguel Silva <[email protected]>
*
*/
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
#include "isp1760-core.h"
#include "isp1760-hcd.h"
#include "isp1760-regs.h"
static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
static struct kmem_cache *urb_listitem_cachep;
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
static inline struct isp1760_hcd *hcd_to_priv(struct usb_hcd *hcd)
{
return *(struct isp1760_hcd **)hcd->hcd_priv;
}
#define dw_to_le32(x) (cpu_to_le32((__force u32)x))
#define le32_to_dw(x) ((__force __dw)(le32_to_cpu(x)))
/* urb state*/
#define DELETE_URB (0x0008)
#define NO_TRANSFER_ACTIVE (0xffffffff)
/* Philips Proprietary Transfer Descriptor (PTD) */
typedef __u32 __bitwise __dw;
struct ptd {
__dw dw0;
__dw dw1;
__dw dw2;
__dw dw3;
__dw dw4;
__dw dw5;
__dw dw6;
__dw dw7;
};
struct ptd_le32 {
__le32 dw0;
__le32 dw1;
__le32 dw2;
__le32 dw3;
__le32 dw4;
__le32 dw5;
__le32 dw6;
__le32 dw7;
};
#define PTD_OFFSET 0x0400
#define ISO_PTD_OFFSET 0x0400
#define INT_PTD_OFFSET 0x0800
#define ATL_PTD_OFFSET 0x0c00
#define PAYLOAD_OFFSET 0x1000
#define ISP_BANK_0 0x00
#define ISP_BANK_1 0x01
#define ISP_BANK_2 0x02
#define ISP_BANK_3 0x03
#define TO_DW(x) ((__force __dw)x)
#define TO_U32(x) ((__force u32)x)
/* ATL */
/* DW0 */
#define DW0_VALID_BIT TO_DW(1)
#define FROM_DW0_VALID(x) (TO_U32(x) & 0x01)
#define TO_DW0_LENGTH(x) TO_DW((((u32)x) << 3))
#define TO_DW0_MAXPACKET(x) TO_DW((((u32)x) << 18))
#define TO_DW0_MULTI(x) TO_DW((((u32)x) << 29))
#define TO_DW0_ENDPOINT(x) TO_DW((((u32)x) << 31))
/* DW1 */
#define TO_DW1_DEVICE_ADDR(x) TO_DW((((u32)x) << 3))
#define TO_DW1_PID_TOKEN(x) TO_DW((((u32)x) << 10))
#define DW1_TRANS_BULK TO_DW(((u32)2 << 12))
#define DW1_TRANS_INT TO_DW(((u32)3 << 12))
#define DW1_TRANS_SPLIT TO_DW(((u32)1 << 14))
#define DW1_SE_USB_LOSPEED TO_DW(((u32)2 << 16))
#define TO_DW1_PORT_NUM(x) TO_DW((((u32)x) << 18))
#define TO_DW1_HUB_NUM(x) TO_DW((((u32)x) << 25))
/* DW2 */
#define TO_DW2_DATA_START_ADDR(x) TO_DW((((u32)x) << 8))
#define TO_DW2_RL(x) TO_DW(((x) << 25))
#define FROM_DW2_RL(x) ((TO_U32(x) >> 25) & 0xf)
/* DW3 */
#define FROM_DW3_NRBYTESTRANSFERRED(x) TO_U32((x) & 0x3fff)
#define FROM_DW3_SCS_NRBYTESTRANSFERRED(x) TO_U32((x) & 0x07ff)
#define TO_DW3_NAKCOUNT(x) TO_DW(((x) << 19))
#define FROM_DW3_NAKCOUNT(x) ((TO_U32(x) >> 19) & 0xf)
#define TO_DW3_CERR(x) TO_DW(((x) << 23))
#define FROM_DW3_CERR(x) ((TO_U32(x) >> 23) & 0x3)
#define TO_DW3_DATA_TOGGLE(x) TO_DW(((x) << 25))
#define FROM_DW3_DATA_TOGGLE(x) ((TO_U32(x) >> 25) & 0x1)
#define TO_DW3_PING(x) TO_DW(((x) << 26))
#define FROM_DW3_PING(x) ((TO_U32(x) >> 26) & 0x1)
#define DW3_ERROR_BIT TO_DW((1 << 28))
#define DW3_BABBLE_BIT TO_DW((1 << 29))
#define DW3_HALT_BIT TO_DW((1 << 30))
#define DW3_ACTIVE_BIT TO_DW((1 << 31))
#define FROM_DW3_ACTIVE(x) ((TO_U32(x) >> 31) & 0x01)
#define INT_UNDERRUN (1 << 2)
#define INT_BABBLE (1 << 1)
#define INT_EXACT (1 << 0)
#define SETUP_PID (2)
#define IN_PID (1)
#define OUT_PID (0)
/* Errata 1 */
#define RL_COUNTER (0)
#define NAK_COUNTER (0)
#define ERR_COUNTER (3)
struct isp1760_qtd {
u8 packet_type;
void *data_buffer;
u32 payload_addr;
/* the rest is HCD-private */
struct list_head qtd_list;
struct urb *urb;
size_t length;
size_t actual_length;
/* QTD_ENQUEUED: waiting for transfer (inactive) */
/* QTD_PAYLOAD_ALLOC: chip mem has been allocated for payload */
/* QTD_XFER_STARTED: valid ptd has been written to isp176x - only
interrupt handler may touch this qtd! */
/* QTD_XFER_COMPLETE: payload has been transferred successfully */
/* QTD_RETIRE: transfer error/abort qtd */
#define QTD_ENQUEUED 0
#define QTD_PAYLOAD_ALLOC 1
#define QTD_XFER_STARTED 2
#define QTD_XFER_COMPLETE 3
#define QTD_RETIRE 4
u32 status;
};
/* Queue head, one for each active endpoint */
struct isp1760_qh {
struct list_head qh_list;
struct list_head qtd_list;
u32 toggle;
u32 ping;
int slot;
int tt_buffer_dirty; /* See USB2.0 spec section 11.17.5 */
};
struct urb_listitem {
struct list_head urb_list;
struct urb *urb;
};
static const u32 isp176x_hc_portsc1_fields[] = {
[PORT_OWNER] = BIT(13),
[PORT_POWER] = BIT(12),
[PORT_LSTATUS] = BIT(10),
[PORT_RESET] = BIT(8),
[PORT_SUSPEND] = BIT(7),
[PORT_RESUME] = BIT(6),
[PORT_PE] = BIT(2),
[PORT_CSC] = BIT(1),
[PORT_CONNECT] = BIT(0),
};
/*
* Access functions for isp176x registers regmap fields
*/
static u32 isp1760_hcd_read(struct usb_hcd *hcd, u32 field)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
return isp1760_field_read(priv->fields, field);
}
/*
* We need, in isp176x, to write directly the values to the portsc1
* register so it will make the other values to trigger.
*/
static void isp1760_hcd_portsc1_set_clear(struct isp1760_hcd *priv, u32 field,
u32 val)
{
u32 bit = isp176x_hc_portsc1_fields[field];
u16 portsc1_reg = priv->is_isp1763 ? ISP1763_HC_PORTSC1 :
ISP176x_HC_PORTSC1;
u32 port_status = readl(priv->base + portsc1_reg);
if (val)
writel(port_status | bit, priv->base + portsc1_reg);
else
writel(port_status & ~bit, priv->base + portsc1_reg);
}
static void isp1760_hcd_write(struct usb_hcd *hcd, u32 field, u32 val)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (unlikely((field >= PORT_OWNER && field <= PORT_CONNECT)))
return isp1760_hcd_portsc1_set_clear(priv, field, val);
isp1760_field_write(priv->fields, field, val);
}
static void isp1760_hcd_set(struct usb_hcd *hcd, u32 field)
{
isp1760_hcd_write(hcd, field, 0xFFFFFFFF);
}
static void isp1760_hcd_clear(struct usb_hcd *hcd, u32 field)
{
isp1760_hcd_write(hcd, field, 0);
}
static int isp1760_hcd_set_and_wait(struct usb_hcd *hcd, u32 field,
u32 timeout_us)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 val;
isp1760_hcd_set(hcd, field);
return regmap_field_read_poll_timeout(priv->fields[field], val,
val, 0, timeout_us);
}
static int isp1760_hcd_set_and_wait_swap(struct usb_hcd *hcd, u32 field,
u32 timeout_us)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 val;
isp1760_hcd_set(hcd, field);
return regmap_field_read_poll_timeout(priv->fields[field], val,
!val, 0, timeout_us);
}
static int isp1760_hcd_clear_and_wait(struct usb_hcd *hcd, u32 field,
u32 timeout_us)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 val;
isp1760_hcd_clear(hcd, field);
return regmap_field_read_poll_timeout(priv->fields[field], val,
!val, 0, timeout_us);
}
static bool isp1760_hcd_is_set(struct usb_hcd *hcd, u32 field)
{
return !!isp1760_hcd_read(hcd, field);
}
static bool isp1760_hcd_ppc_is_set(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (priv->is_isp1763)
return true;
return isp1760_hcd_is_set(hcd, HCS_PPC);
}
static u32 isp1760_hcd_n_ports(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (priv->is_isp1763)
return 1;
return isp1760_hcd_read(hcd, HCS_N_PORTS);
}
/*
* Access functions for isp176x memory (offset >= 0x0400).
*
* bank_reads8() reads memory locations prefetched by an earlier write to
* HC_MEMORY_REG (see isp176x datasheet). Unless you want to do fancy multi-
* bank optimizations, you should use the more generic mem_read() below.
*
* For access to ptd memory, use the specialized ptd_read() and ptd_write()
* below.
*
* These functions copy via MMIO data to/from the device. memcpy_{to|from}io()
* doesn't quite work because some people have to enforce 32-bit access
*/
static void bank_reads8(void __iomem *src_base, u32 src_offset, u32 bank_addr,
__u32 *dst, u32 bytes)
{
__u32 __iomem *src;
u32 val;
__u8 *src_byteptr;
__u8 *dst_byteptr;
src = src_base + (bank_addr | src_offset);
if (src_offset < PAYLOAD_OFFSET) {
while (bytes >= 4) {
*dst = readl_relaxed(src);
bytes -= 4;
src++;
dst++;
}
} else {
while (bytes >= 4) {
*dst = __raw_readl(src);
bytes -= 4;
src++;
dst++;
}
}
if (!bytes)
return;
/* in case we have 3, 2 or 1 by left. The dst buffer may not be fully
* allocated.
*/
if (src_offset < PAYLOAD_OFFSET)
val = readl_relaxed(src);
else
val = __raw_readl(src);
dst_byteptr = (void *) dst;
src_byteptr = (void *) &val;
while (bytes > 0) {
*dst_byteptr = *src_byteptr;
dst_byteptr++;
src_byteptr++;
bytes--;
}
}
static void isp1760_mem_read(struct usb_hcd *hcd, u32 src_offset, void *dst,
u32 bytes)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
isp1760_reg_write(priv->regs, ISP176x_HC_MEMORY, src_offset);
ndelay(100);
bank_reads8(priv->base, src_offset, ISP_BANK_0, dst, bytes);
}
/*
* ISP1763 does not have the banks direct host controller memory access,
* needs to use the HC_DATA register. Add data read/write according to this,
* and also adjust 16bit access.
*/
static void isp1763_mem_read(struct usb_hcd *hcd, u16 srcaddr,
u16 *dstptr, u32 bytes)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
/* Write the starting device address to the hcd memory register */
isp1760_reg_write(priv->regs, ISP1763_HC_MEMORY, srcaddr);
ndelay(100); /* Delay between consecutive access */
/* As long there are at least 16-bit to read ... */
while (bytes >= 2) {
*dstptr = __raw_readw(priv->base + ISP1763_HC_DATA);
bytes -= 2;
dstptr++;
}
/* If there are no more bytes to read, return */
if (bytes <= 0)
return;
*((u8 *)dstptr) = (u8)(readw(priv->base + ISP1763_HC_DATA) & 0xFF);
}
static void mem_read(struct usb_hcd *hcd, u32 src_offset, __u32 *dst,
u32 bytes)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (!priv->is_isp1763)
return isp1760_mem_read(hcd, src_offset, (u16 *)dst, bytes);
isp1763_mem_read(hcd, (u16)src_offset, (u16 *)dst, bytes);
}
static void isp1760_mem_write(void __iomem *dst_base, u32 dst_offset,
__u32 const *src, u32 bytes)
{
__u32 __iomem *dst;
dst = dst_base + dst_offset;
if (dst_offset < PAYLOAD_OFFSET) {
while (bytes >= 4) {
writel_relaxed(*src, dst);
bytes -= 4;
src++;
dst++;
}
} else {
while (bytes >= 4) {
__raw_writel(*src, dst);
bytes -= 4;
src++;
dst++;
}
}
if (!bytes)
return;
/* in case we have 3, 2 or 1 bytes left. The buffer is allocated and the
* extra bytes should not be read by the HW.
*/
if (dst_offset < PAYLOAD_OFFSET)
writel_relaxed(*src, dst);
else
__raw_writel(*src, dst);
}
static void isp1763_mem_write(struct usb_hcd *hcd, u16 dstaddr, u16 *src,
u32 bytes)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
/* Write the starting device address to the hcd memory register */
isp1760_reg_write(priv->regs, ISP1763_HC_MEMORY, dstaddr);
ndelay(100); /* Delay between consecutive access */
while (bytes >= 2) {
/* Get and write the data; then adjust the data ptr and len */
__raw_writew(*src, priv->base + ISP1763_HC_DATA);
bytes -= 2;
src++;
}
/* If there are no more bytes to process, return */
if (bytes <= 0)
return;
/*
* The only way to get here is if there is a single byte left,
* get it and write it to the data reg;
*/
writew(*((u8 *)src), priv->base + ISP1763_HC_DATA);
}
static void mem_write(struct usb_hcd *hcd, u32 dst_offset, __u32 *src,
u32 bytes)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (!priv->is_isp1763)
return isp1760_mem_write(priv->base, dst_offset, src, bytes);
isp1763_mem_write(hcd, dst_offset, (u16 *)src, bytes);
}
/*
* Read and write ptds. 'ptd_offset' should be one of ISO_PTD_OFFSET,
* INT_PTD_OFFSET, and ATL_PTD_OFFSET. 'slot' should be less than 32.
*/
static void isp1760_ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
struct ptd *ptd)
{
u16 src_offset = ptd_offset + slot * sizeof(*ptd);
struct isp1760_hcd *priv = hcd_to_priv(hcd);
isp1760_reg_write(priv->regs, ISP176x_HC_MEMORY, src_offset);
ndelay(90);
bank_reads8(priv->base, src_offset, ISP_BANK_0, (void *)ptd,
sizeof(*ptd));
}
static void isp1763_ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
struct ptd *ptd)
{
u16 src_offset = ptd_offset + slot * sizeof(*ptd);
struct ptd_le32 le32_ptd;
isp1763_mem_read(hcd, src_offset, (u16 *)&le32_ptd, sizeof(le32_ptd));
/* Normalize the data obtained */
ptd->dw0 = le32_to_dw(le32_ptd.dw0);
ptd->dw1 = le32_to_dw(le32_ptd.dw1);
ptd->dw2 = le32_to_dw(le32_ptd.dw2);
ptd->dw3 = le32_to_dw(le32_ptd.dw3);
ptd->dw4 = le32_to_dw(le32_ptd.dw4);
ptd->dw5 = le32_to_dw(le32_ptd.dw5);
ptd->dw6 = le32_to_dw(le32_ptd.dw6);
ptd->dw7 = le32_to_dw(le32_ptd.dw7);
}
static void ptd_read(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (!priv->is_isp1763)
return isp1760_ptd_read(hcd, ptd_offset, slot, ptd);
isp1763_ptd_read(hcd, ptd_offset, slot, ptd);
}
static void isp1763_ptd_write(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
struct ptd *cpu_ptd)
{
u16 dst_offset = ptd_offset + slot * sizeof(*cpu_ptd);
struct ptd_le32 ptd;
ptd.dw0 = dw_to_le32(cpu_ptd->dw0);
ptd.dw1 = dw_to_le32(cpu_ptd->dw1);
ptd.dw2 = dw_to_le32(cpu_ptd->dw2);
ptd.dw3 = dw_to_le32(cpu_ptd->dw3);
ptd.dw4 = dw_to_le32(cpu_ptd->dw4);
ptd.dw5 = dw_to_le32(cpu_ptd->dw5);
ptd.dw6 = dw_to_le32(cpu_ptd->dw6);
ptd.dw7 = dw_to_le32(cpu_ptd->dw7);
isp1763_mem_write(hcd, dst_offset, (u16 *)&ptd.dw0,
8 * sizeof(ptd.dw0));
}
static void isp1760_ptd_write(void __iomem *base, u32 ptd_offset, u32 slot,
struct ptd *ptd)
{
u32 dst_offset = ptd_offset + slot * sizeof(*ptd);
/*
* Make sure dw0 gets written last (after other dw's and after payload)
* since it contains the enable bit
*/
isp1760_mem_write(base, dst_offset + sizeof(ptd->dw0),
(__force u32 *)&ptd->dw1, 7 * sizeof(ptd->dw1));
wmb();
isp1760_mem_write(base, dst_offset, (__force u32 *)&ptd->dw0,
sizeof(ptd->dw0));
}
static void ptd_write(struct usb_hcd *hcd, u32 ptd_offset, u32 slot,
struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (!priv->is_isp1763)
return isp1760_ptd_write(priv->base, ptd_offset, slot, ptd);
isp1763_ptd_write(hcd, ptd_offset, slot, ptd);
}
/* memory management of the 60kb on the chip from 0x1000 to 0xffff */
static void init_memory(struct isp1760_hcd *priv)
{
const struct isp1760_memory_layout *mem = priv->memory_layout;
int i, j, curr;
u32 payload_addr;
payload_addr = PAYLOAD_OFFSET;
for (i = 0, curr = 0; i < ARRAY_SIZE(mem->blocks); i++, curr += j) {
for (j = 0; j < mem->blocks[i]; j++) {
priv->memory_pool[curr + j].start = payload_addr;
priv->memory_pool[curr + j].size = mem->blocks_size[i];
priv->memory_pool[curr + j].free = 1;
payload_addr += priv->memory_pool[curr + j].size;
}
}
WARN_ON(payload_addr - priv->memory_pool[0].start >
mem->payload_area_size);
}
static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
int i;
WARN_ON(qtd->payload_addr);
if (!qtd->length)
return;
for (i = 0; i < mem->payload_blocks; i++) {
if (priv->memory_pool[i].size >= qtd->length &&
priv->memory_pool[i].free) {
priv->memory_pool[i].free = 0;
qtd->payload_addr = priv->memory_pool[i].start;
return;
}
}
}
static void free_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
int i;
if (!qtd->payload_addr)
return;
for (i = 0; i < mem->payload_blocks; i++) {
if (priv->memory_pool[i].start == qtd->payload_addr) {
WARN_ON(priv->memory_pool[i].free);
priv->memory_pool[i].free = 1;
qtd->payload_addr = 0;
return;
}
}
dev_err(hcd->self.controller, "%s: Invalid pointer: %08x\n",
__func__, qtd->payload_addr);
WARN_ON(1);
qtd->payload_addr = 0;
}
/* reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
hcd->state = HC_STATE_HALT;
priv->next_statechange = jiffies;
return isp1760_hcd_set_and_wait_swap(hcd, CMD_RESET, 250 * 1000);
}
static struct isp1760_qh *qh_alloc(gfp_t flags)
{
struct isp1760_qh *qh;
qh = kmem_cache_zalloc(qh_cachep, flags);
if (!qh)
return NULL;
INIT_LIST_HEAD(&qh->qh_list);
INIT_LIST_HEAD(&qh->qtd_list);
qh->slot = -1;
return qh;
}
static void qh_free(struct isp1760_qh *qh)
{
WARN_ON(!list_empty(&qh->qtd_list));
WARN_ON(qh->slot > -1);
kmem_cache_free(qh_cachep, qh);
}
/* one-time init, only for memory state */
static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 isoc_cache;
u32 isoc_thres;
int i;
spin_lock_init(&priv->lock);
for (i = 0; i < QH_END; i++)
INIT_LIST_HEAD(&priv->qh_list[i]);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
priv->periodic_size = DEFAULT_I_TDPS;
if (priv->is_isp1763) {
priv->i_thresh = 2;
return 0;
}
/* controllers may cache some of the periodic schedule ... */
isoc_cache = isp1760_hcd_read(hcd, HCC_ISOC_CACHE);
isoc_thres = isp1760_hcd_read(hcd, HCC_ISOC_THRES);
/* full frame cache */
if (isoc_cache)
priv->i_thresh = 8;
else /* N microframes cached */
priv->i_thresh = 2 + isoc_thres;
return 0;
}
static int isp1760_hc_setup(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 atx_reset;
int result;
u32 scratch;
u32 pattern;
if (priv->is_isp1763)
pattern = 0xcafe;
else
pattern = 0xdeadcafe;
isp1760_hcd_write(hcd, HC_SCRATCH, pattern);
/*
* we do not care about the read value here we just want to
* change bus pattern.
*/
isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
scratch = isp1760_hcd_read(hcd, HC_SCRATCH);
if (scratch != pattern) {
dev_err(hcd->self.controller, "Scratch test failed. 0x%08x\n",
scratch);
return -ENODEV;
}
/*
* The RESET_HC bit in the SW_RESET register is supposed to reset the
* host controller without touching the CPU interface registers, but at
* least on the ISP1761 it seems to behave as the RESET_ALL bit and
* reset the whole device. We thus can't use it here, so let's reset
* the host controller through the EHCI USB Command register. The device
* has been reset in core code anyway, so this shouldn't matter.
*/
isp1760_hcd_clear(hcd, ISO_BUF_FILL);
isp1760_hcd_clear(hcd, INT_BUF_FILL);
isp1760_hcd_clear(hcd, ATL_BUF_FILL);
isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
result = ehci_reset(hcd);
if (result)
return result;
/* Step 11 passed */
/* ATL reset */
if (priv->is_isp1763)
atx_reset = SW_RESET_RESET_ATX;
else
atx_reset = ALL_ATX_RESET;
isp1760_hcd_set(hcd, atx_reset);
mdelay(10);
isp1760_hcd_clear(hcd, atx_reset);
if (priv->is_isp1763) {
isp1760_hcd_set(hcd, HW_OTG_DISABLE);
isp1760_hcd_set(hcd, HW_SW_SEL_HC_DC_CLEAR);
isp1760_hcd_set(hcd, HW_HC_2_DIS_CLEAR);
mdelay(10);
isp1760_hcd_set(hcd, HW_INTF_LOCK);
}
isp1760_hcd_set(hcd, HC_INT_IRQ_ENABLE);
isp1760_hcd_set(hcd, HC_ATL_IRQ_ENABLE);
return priv_init(hcd);
}
static u32 base_to_chip(u32 base)
{
return ((base - 0x400) >> 3);
}
static int last_qtd_of_urb(struct isp1760_qtd *qtd, struct isp1760_qh *qh)
{
struct urb *urb;
if (list_is_last(&qtd->qtd_list, &qh->qtd_list))
return 1;
urb = qtd->urb;
qtd = list_entry(qtd->qtd_list.next, typeof(*qtd), qtd_list);
return (qtd->urb != urb);
}
/* magic numbers that can affect system performance */
#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
#define EHCI_TUNE_RL_TT 0
#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
#define EHCI_TUNE_MULT_TT 1
#define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
static void create_ptd_atl(struct isp1760_qh *qh,
struct isp1760_qtd *qtd, struct ptd *ptd)
{
u32 maxpacket;
u32 multi;
u32 rl = RL_COUNTER;
u32 nak = NAK_COUNTER;
memset(ptd, 0, sizeof(*ptd));
/* according to 3.6.2, max packet len can not be > 0x400 */
maxpacket = usb_maxpacket(qtd->urb->dev, qtd->urb->pipe);
multi = 1 + ((maxpacket >> 11) & 0x3);
maxpacket &= 0x7ff;
/* DW0 */
ptd->dw0 = DW0_VALID_BIT;
ptd->dw0 |= TO_DW0_LENGTH(qtd->length);
ptd->dw0 |= TO_DW0_MAXPACKET(maxpacket);
ptd->dw0 |= TO_DW0_ENDPOINT(usb_pipeendpoint(qtd->urb->pipe));
/* DW1 */
ptd->dw1 = TO_DW((usb_pipeendpoint(qtd->urb->pipe) >> 1));
ptd->dw1 |= TO_DW1_DEVICE_ADDR(usb_pipedevice(qtd->urb->pipe));
ptd->dw1 |= TO_DW1_PID_TOKEN(qtd->packet_type);
if (usb_pipebulk(qtd->urb->pipe))
ptd->dw1 |= DW1_TRANS_BULK;
else if (usb_pipeint(qtd->urb->pipe))
ptd->dw1 |= DW1_TRANS_INT;
if (qtd->urb->dev->speed != USB_SPEED_HIGH) {
/* split transaction */
ptd->dw1 |= DW1_TRANS_SPLIT;
if (qtd->urb->dev->speed == USB_SPEED_LOW)
ptd->dw1 |= DW1_SE_USB_LOSPEED;
ptd->dw1 |= TO_DW1_PORT_NUM(qtd->urb->dev->ttport);
ptd->dw1 |= TO_DW1_HUB_NUM(qtd->urb->dev->tt->hub->devnum);
/* SE bit for Split INT transfers */
if (usb_pipeint(qtd->urb->pipe) &&
(qtd->urb->dev->speed == USB_SPEED_LOW))
ptd->dw1 |= DW1_SE_USB_LOSPEED;
rl = 0;
nak = 0;
} else {
ptd->dw0 |= TO_DW0_MULTI(multi);
if (usb_pipecontrol(qtd->urb->pipe) ||
usb_pipebulk(qtd->urb->pipe))
ptd->dw3 |= TO_DW3_PING(qh->ping);
}
/* DW2 */
ptd->dw2 = 0;
ptd->dw2 |= TO_DW2_DATA_START_ADDR(base_to_chip(qtd->payload_addr));
ptd->dw2 |= TO_DW2_RL(rl);
/* DW3 */
ptd->dw3 |= TO_DW3_NAKCOUNT(nak);
ptd->dw3 |= TO_DW3_DATA_TOGGLE(qh->toggle);
if (usb_pipecontrol(qtd->urb->pipe)) {
if (qtd->data_buffer == qtd->urb->setup_packet)
ptd->dw3 &= ~TO_DW3_DATA_TOGGLE(1);
else if (last_qtd_of_urb(qtd, qh))
ptd->dw3 |= TO_DW3_DATA_TOGGLE(1);
}
ptd->dw3 |= DW3_ACTIVE_BIT;
/* Cerr */
ptd->dw3 |= TO_DW3_CERR(ERR_COUNTER);
}
static void transform_add_int(struct isp1760_qh *qh,
struct isp1760_qtd *qtd, struct ptd *ptd)
{
u32 usof;
u32 period;
/*
* Most of this is guessing. ISP1761 datasheet is quite unclear, and
* the algorithm from the original Philips driver code, which was
* pretty much used in this driver before as well, is quite horrendous
* and, i believe, incorrect. The code below follows the datasheet and
* USB2.0 spec as far as I can tell, and plug/unplug seems to be much
* more reliable this way (fingers crossed...).
*/
if (qtd->urb->dev->speed == USB_SPEED_HIGH) {
/* urb->interval is in units of microframes (1/8 ms) */
period = qtd->urb->interval >> 3;
if (qtd->urb->interval > 4)
usof = 0x01; /* One bit set =>
interval 1 ms * uFrame-match */
else if (qtd->urb->interval > 2)
usof = 0x22; /* Two bits set => interval 1/2 ms */
else if (qtd->urb->interval > 1)
usof = 0x55; /* Four bits set => interval 1/4 ms */
else
usof = 0xff; /* All bits set => interval 1/8 ms */
} else {
/* urb->interval is in units of frames (1 ms) */
period = qtd->urb->interval;
usof = 0x0f; /* Execute Start Split on any of the
four first uFrames */
/*
* First 8 bits in dw5 is uSCS and "specifies which uSOF the
* complete split needs to be sent. Valid only for IN." Also,
* "All bits can be set to one for every transfer." (p 82,
* ISP1761 data sheet.) 0x1c is from Philips driver. Where did
* that number come from? 0xff seems to work fine...
*/
/* ptd->dw5 = 0x1c; */
ptd->dw5 = TO_DW(0xff); /* Execute Complete Split on any uFrame */
}
period = period >> 1;/* Ensure equal or shorter period than requested */
period &= 0xf8; /* Mask off too large values and lowest unused 3 bits */
ptd->dw2 |= TO_DW(period);
ptd->dw4 = TO_DW(usof);
}
static void create_ptd_int(struct isp1760_qh *qh,
struct isp1760_qtd *qtd, struct ptd *ptd)
{
create_ptd_atl(qh, qtd, ptd);
transform_add_int(qh, qtd, ptd);
}
static void isp1760_urb_done(struct usb_hcd *hcd, struct urb *urb)
__releases(priv->lock)
__acquires(priv->lock)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
if (!urb->unlinked) {
if (urb->status == -EINPROGRESS)
urb->status = 0;
}
if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
void *ptr;
for (ptr = urb->transfer_buffer;
ptr < urb->transfer_buffer + urb->transfer_buffer_length;
ptr += PAGE_SIZE)
flush_dcache_page(virt_to_page(ptr));
}
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&priv->lock);
usb_hcd_giveback_urb(hcd, urb, urb->status);
spin_lock(&priv->lock);
}
static struct isp1760_qtd *qtd_alloc(gfp_t flags, struct urb *urb,
u8 packet_type)
{
struct isp1760_qtd *qtd;
qtd = kmem_cache_zalloc(qtd_cachep, flags);
if (!qtd)
return NULL;
INIT_LIST_HEAD(&qtd->qtd_list);
qtd->urb = urb;
qtd->packet_type = packet_type;
qtd->status = QTD_ENQUEUED;
qtd->actual_length = 0;
return qtd;
}
static void qtd_free(struct isp1760_qtd *qtd)
{
WARN_ON(qtd->payload_addr);
kmem_cache_free(qtd_cachep, qtd);
}
static void start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot,
struct isp1760_slotinfo *slots,
struct isp1760_qtd *qtd, struct isp1760_qh *qh,
struct ptd *ptd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
int skip_map;
WARN_ON((slot < 0) || (slot > mem->slot_num - 1));
WARN_ON(qtd->length && !qtd->payload_addr);
WARN_ON(slots[slot].qtd);
WARN_ON(slots[slot].qh);
WARN_ON(qtd->status != QTD_PAYLOAD_ALLOC);
if (priv->is_isp1763)
ndelay(100);
/* Make sure done map has not triggered from some unlinked transfer */
if (ptd_offset == ATL_PTD_OFFSET) {
skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP,
skip_map | (1 << slot));
priv->atl_done_map |= isp1760_hcd_read(hcd, HC_ATL_PTD_DONEMAP);
priv->atl_done_map &= ~(1 << slot);
} else {
skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP,
skip_map | (1 << slot));
priv->int_done_map |= isp1760_hcd_read(hcd, HC_INT_PTD_DONEMAP);
priv->int_done_map &= ~(1 << slot);
}
skip_map &= ~(1 << slot);
qh->slot = slot;
qtd->status = QTD_XFER_STARTED;
slots[slot].timestamp = jiffies;
slots[slot].qtd = qtd;
slots[slot].qh = qh;
ptd_write(hcd, ptd_offset, slot, ptd);
if (ptd_offset == ATL_PTD_OFFSET)
isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP, skip_map);
else
isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP, skip_map);
}
static int is_short_bulk(struct isp1760_qtd *qtd)
{
return (usb_pipebulk(qtd->urb->pipe) &&
(qtd->actual_length < qtd->length));
}
static void collect_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct list_head *urb_list)
{
struct isp1760_qtd *qtd, *qtd_next;
struct urb_listitem *urb_listitem;
int last_qtd;
list_for_each_entry_safe(qtd, qtd_next, &qh->qtd_list, qtd_list) {
if (qtd->status < QTD_XFER_COMPLETE)
break;
last_qtd = last_qtd_of_urb(qtd, qh);
if ((!last_qtd) && (qtd->status == QTD_RETIRE))
qtd_next->status = QTD_RETIRE;
if (qtd->status == QTD_XFER_COMPLETE) {
if (qtd->actual_length) {
switch (qtd->packet_type) {
case IN_PID:
mem_read(hcd, qtd->payload_addr,
qtd->data_buffer,
qtd->actual_length);
fallthrough;
case OUT_PID:
qtd->urb->actual_length +=
qtd->actual_length;
fallthrough;
case SETUP_PID:
break;
}
}
if (is_short_bulk(qtd)) {
if (qtd->urb->transfer_flags & URB_SHORT_NOT_OK)
qtd->urb->status = -EREMOTEIO;
if (!last_qtd)
qtd_next->status = QTD_RETIRE;
}
}
if (qtd->payload_addr)
free_mem(hcd, qtd);
if (last_qtd) {
if ((qtd->status == QTD_RETIRE) &&
(qtd->urb->status == -EINPROGRESS))
qtd->urb->status = -EPIPE;
/* Defer calling of urb_done() since it releases lock */
urb_listitem = kmem_cache_zalloc(urb_listitem_cachep,
GFP_ATOMIC);
if (unlikely(!urb_listitem))
break; /* Try again on next call */
urb_listitem->urb = qtd->urb;
list_add_tail(&urb_listitem->urb_list, urb_list);
}
list_del(&qtd->qtd_list);
qtd_free(qtd);
}
}
#define ENQUEUE_DEPTH 2
static void enqueue_qtds(struct usb_hcd *hcd, struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
int slot_num = mem->slot_num;
int ptd_offset;
struct isp1760_slotinfo *slots;
int curr_slot, free_slot;
int n;
struct ptd ptd;
struct isp1760_qtd *qtd;
if (unlikely(list_empty(&qh->qtd_list))) {
WARN_ON(1);
return;
}
/* Make sure this endpoint's TT buffer is clean before queueing ptds */
if (qh->tt_buffer_dirty)
return;
if (usb_pipeint(list_entry(qh->qtd_list.next, struct isp1760_qtd,
qtd_list)->urb->pipe)) {
ptd_offset = INT_PTD_OFFSET;
slots = priv->int_slots;
} else {
ptd_offset = ATL_PTD_OFFSET;
slots = priv->atl_slots;
}
free_slot = -1;
for (curr_slot = 0; curr_slot < slot_num; curr_slot++) {
if ((free_slot == -1) && (slots[curr_slot].qtd == NULL))
free_slot = curr_slot;
if (slots[curr_slot].qh == qh)
break;
}
n = 0;
list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
if (qtd->status == QTD_ENQUEUED) {
WARN_ON(qtd->payload_addr);
alloc_mem(hcd, qtd);
if ((qtd->length) && (!qtd->payload_addr))
break;
if (qtd->length && (qtd->packet_type == SETUP_PID ||
qtd->packet_type == OUT_PID)) {
mem_write(hcd, qtd->payload_addr,
qtd->data_buffer, qtd->length);
}
qtd->status = QTD_PAYLOAD_ALLOC;
}
if (qtd->status == QTD_PAYLOAD_ALLOC) {
/*
if ((curr_slot > 31) && (free_slot == -1))
dev_dbg(hcd->self.controller, "%s: No slot "
"available for transfer\n", __func__);
*/
/* Start xfer for this endpoint if not already done */
if ((curr_slot > slot_num - 1) && (free_slot > -1)) {
if (usb_pipeint(qtd->urb->pipe))
create_ptd_int(qh, qtd, &ptd);
else
create_ptd_atl(qh, qtd, &ptd);
start_bus_transfer(hcd, ptd_offset, free_slot,
slots, qtd, qh, &ptd);
curr_slot = free_slot;
}
n++;
if (n >= ENQUEUE_DEPTH)
break;
}
}
}
static void schedule_ptds(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv;
struct isp1760_qh *qh, *qh_next;
struct list_head *ep_queue;
LIST_HEAD(urb_list);
struct urb_listitem *urb_listitem, *urb_listitem_next;
int i;
if (!hcd) {
WARN_ON(1);
return;
}
priv = hcd_to_priv(hcd);
/*
* check finished/retired xfers, transfer payloads, call urb_done()
*/
for (i = 0; i < QH_END; i++) {
ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
collect_qtds(hcd, qh, &urb_list);
if (list_empty(&qh->qtd_list))
list_del(&qh->qh_list);
}
}
list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
urb_list) {
isp1760_urb_done(hcd, urb_listitem->urb);
kmem_cache_free(urb_listitem_cachep, urb_listitem);
}
/*
* Schedule packets for transfer.
*
* According to USB2.0 specification:
*
* 1st prio: interrupt xfers, up to 80 % of bandwidth
* 2nd prio: control xfers
* 3rd prio: bulk xfers
*
* ... but let's use a simpler scheme here (mostly because ISP1761 doc
* is very unclear on how to prioritize traffic):
*
* 1) Enqueue any queued control transfers, as long as payload chip mem
* and PTD ATL slots are available.
* 2) Enqueue any queued INT transfers, as long as payload chip mem
* and PTD INT slots are available.
* 3) Enqueue any queued bulk transfers, as long as payload chip mem
* and PTD ATL slots are available.
*
* Use double buffering (ENQUEUE_DEPTH==2) as a compromise between
* conservation of chip mem and performance.
*
* I'm sure this scheme could be improved upon!
*/
for (i = 0; i < QH_END; i++) {
ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
enqueue_qtds(hcd, qh);
}
}
#define PTD_STATE_QTD_DONE 1
#define PTD_STATE_QTD_RELOAD 2
#define PTD_STATE_URB_RETIRE 3
static int check_int_transfer(struct usb_hcd *hcd, struct ptd *ptd,
struct urb *urb)
{
u32 dw4;
int i;
dw4 = TO_U32(ptd->dw4);
dw4 >>= 8;
/* FIXME: ISP1761 datasheet does not say what to do with these. Do we
need to handle these errors? Is it done in hardware? */
if (ptd->dw3 & DW3_HALT_BIT) {
urb->status = -EPROTO; /* Default unknown error */
for (i = 0; i < 8; i++) {
switch (dw4 & 0x7) {
case INT_UNDERRUN:
dev_dbg(hcd->self.controller, "%s: underrun "
"during uFrame %d\n",
__func__, i);
urb->status = -ECOMM; /* Could not write data */
break;
case INT_EXACT:
dev_dbg(hcd->self.controller, "%s: transaction "
"error during uFrame %d\n",
__func__, i);
urb->status = -EPROTO; /* timeout, bad CRC, PID
error etc. */
break;
case INT_BABBLE:
dev_dbg(hcd->self.controller, "%s: babble "
"error during uFrame %d\n",
__func__, i);
urb->status = -EOVERFLOW;
break;
}
dw4 >>= 3;
}
return PTD_STATE_URB_RETIRE;
}
return PTD_STATE_QTD_DONE;
}
static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
struct urb *urb)
{
WARN_ON(!ptd);
if (ptd->dw3 & DW3_HALT_BIT) {
if (ptd->dw3 & DW3_BABBLE_BIT)
urb->status = -EOVERFLOW;
else if (FROM_DW3_CERR(ptd->dw3))
urb->status = -EPIPE; /* Stall */
else
urb->status = -EPROTO; /* Unknown */
/*
dev_dbg(hcd->self.controller, "%s: ptd error:\n"
" dw0: %08x dw1: %08x dw2: %08x dw3: %08x\n"
" dw4: %08x dw5: %08x dw6: %08x dw7: %08x\n",
__func__,
ptd->dw0, ptd->dw1, ptd->dw2, ptd->dw3,
ptd->dw4, ptd->dw5, ptd->dw6, ptd->dw7);
*/
return PTD_STATE_URB_RETIRE;
}
if ((ptd->dw3 & DW3_ERROR_BIT) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
/* Transfer Error, *but* active and no HALT -> reload */
dev_dbg(hcd->self.controller, "PID error; reloading ptd\n");
return PTD_STATE_QTD_RELOAD;
}
if (!FROM_DW3_NAKCOUNT(ptd->dw3) && (ptd->dw3 & DW3_ACTIVE_BIT)) {
/*
* NAKs are handled in HW by the chip. Usually if the
* device is not able to send data fast enough.
* This happens mostly on slower hardware.
*/
return PTD_STATE_QTD_RELOAD;
}
return PTD_STATE_QTD_DONE;
}
static void handle_done_ptds(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct ptd ptd;
struct isp1760_qh *qh;
int slot;
int state;
struct isp1760_slotinfo *slots;
u32 ptd_offset;
struct isp1760_qtd *qtd;
int modified;
int skip_map;
skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
priv->int_done_map &= ~skip_map;
skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
priv->atl_done_map &= ~skip_map;
modified = priv->int_done_map || priv->atl_done_map;
while (priv->int_done_map || priv->atl_done_map) {
if (priv->int_done_map) {
/* INT ptd */
slot = __ffs(priv->int_done_map);
priv->int_done_map &= ~(1 << slot);
slots = priv->int_slots;
/* This should not trigger, and could be removed if
noone have any problems with it triggering: */
if (!slots[slot].qh) {
WARN_ON(1);
continue;
}
ptd_offset = INT_PTD_OFFSET;
ptd_read(hcd, INT_PTD_OFFSET, slot, &ptd);
state = check_int_transfer(hcd, &ptd,
slots[slot].qtd->urb);
} else {
/* ATL ptd */
slot = __ffs(priv->atl_done_map);
priv->atl_done_map &= ~(1 << slot);
slots = priv->atl_slots;
/* This should not trigger, and could be removed if
noone have any problems with it triggering: */
if (!slots[slot].qh) {
WARN_ON(1);
continue;
}
ptd_offset = ATL_PTD_OFFSET;
ptd_read(hcd, ATL_PTD_OFFSET, slot, &ptd);
state = check_atl_transfer(hcd, &ptd,
slots[slot].qtd->urb);
}
qtd = slots[slot].qtd;
slots[slot].qtd = NULL;
qh = slots[slot].qh;
slots[slot].qh = NULL;
qh->slot = -1;
WARN_ON(qtd->status != QTD_XFER_STARTED);
switch (state) {
case PTD_STATE_QTD_DONE:
if ((usb_pipeint(qtd->urb->pipe)) &&
(qtd->urb->dev->speed != USB_SPEED_HIGH))
qtd->actual_length =
FROM_DW3_SCS_NRBYTESTRANSFERRED(ptd.dw3);
else
qtd->actual_length =
FROM_DW3_NRBYTESTRANSFERRED(ptd.dw3);
qtd->status = QTD_XFER_COMPLETE;
if (list_is_last(&qtd->qtd_list, &qh->qtd_list) ||
is_short_bulk(qtd))
qtd = NULL;
else
qtd = list_entry(qtd->qtd_list.next,
typeof(*qtd), qtd_list);
qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
qh->ping = FROM_DW3_PING(ptd.dw3);
break;
case PTD_STATE_QTD_RELOAD: /* QTD_RETRY, for atls only */
qtd->status = QTD_PAYLOAD_ALLOC;
ptd.dw0 |= DW0_VALID_BIT;
/* RL counter = ERR counter */
ptd.dw3 &= ~TO_DW3_NAKCOUNT(0xf);
ptd.dw3 |= TO_DW3_NAKCOUNT(FROM_DW2_RL(ptd.dw2));
ptd.dw3 &= ~TO_DW3_CERR(3);
ptd.dw3 |= TO_DW3_CERR(ERR_COUNTER);
qh->toggle = FROM_DW3_DATA_TOGGLE(ptd.dw3);
qh->ping = FROM_DW3_PING(ptd.dw3);
break;
case PTD_STATE_URB_RETIRE:
qtd->status = QTD_RETIRE;
if ((qtd->urb->dev->speed != USB_SPEED_HIGH) &&
(qtd->urb->status != -EPIPE) &&
(qtd->urb->status != -EREMOTEIO)) {
qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(qtd->urb))
/* Clear failed; let's hope things work
anyway */
qh->tt_buffer_dirty = 0;
}
qtd = NULL;
qh->toggle = 0;
qh->ping = 0;
break;
default:
WARN_ON(1);
continue;
}
if (qtd && (qtd->status == QTD_PAYLOAD_ALLOC)) {
if (slots == priv->int_slots) {
if (state == PTD_STATE_QTD_RELOAD)
dev_err(hcd->self.controller,
"%s: PTD_STATE_QTD_RELOAD on "
"interrupt packet\n", __func__);
if (state != PTD_STATE_QTD_RELOAD)
create_ptd_int(qh, qtd, &ptd);
} else {
if (state != PTD_STATE_QTD_RELOAD)
create_ptd_atl(qh, qtd, &ptd);
}
start_bus_transfer(hcd, ptd_offset, slot, slots, qtd,
qh, &ptd);
}
}
if (modified)
schedule_ptds(hcd);
}
static irqreturn_t isp1760_irq(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
irqreturn_t irqret = IRQ_NONE;
u32 int_reg;
u32 imask;
spin_lock(&priv->lock);
if (!(hcd->state & HC_STATE_RUNNING))
goto leave;
imask = isp1760_hcd_read(hcd, HC_INTERRUPT);
if (unlikely(!imask))
goto leave;
int_reg = priv->is_isp1763 ? ISP1763_HC_INTERRUPT :
ISP176x_HC_INTERRUPT;
isp1760_reg_write(priv->regs, int_reg, imask);
priv->int_done_map |= isp1760_hcd_read(hcd, HC_INT_PTD_DONEMAP);
priv->atl_done_map |= isp1760_hcd_read(hcd, HC_ATL_PTD_DONEMAP);
handle_done_ptds(hcd);
irqret = IRQ_HANDLED;
leave:
spin_unlock(&priv->lock);
return irqret;
}
/*
* Workaround for problem described in chip errata 2:
*
* Sometimes interrupts are not generated when ATL (not INT?) completion occurs.
* One solution suggested in the errata is to use SOF interrupts _instead_of_
* ATL done interrupts (the "instead of" might be important since it seems
* enabling ATL interrupts also causes the chip to sometimes - rarely - "forget"
* to set the PTD's done bit in addition to not generating an interrupt!).
*
* So if we use SOF + ATL interrupts, we sometimes get stale PTDs since their
* done bit is not being set. This is bad - it blocks the endpoint until reboot.
*
* If we use SOF interrupts only, we get latency between ptd completion and the
* actual handling. This is very noticeable in testusb runs which takes several
* minutes longer without ATL interrupts.
*
* A better solution is to run the code below every SLOT_CHECK_PERIOD ms. If it
* finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
* slot's ACTIVE and VALID bits. If these are not set, the ptd is considered
* completed and its done map bit is set.
*
* The values of SLOT_TIMEOUT and SLOT_CHECK_PERIOD have been arbitrarily chosen
* not to cause too much lag when this HW bug occurs, while still hopefully
* ensuring that the check does not falsely trigger.
*/
#define SLOT_TIMEOUT 300
#define SLOT_CHECK_PERIOD 200
static struct timer_list errata2_timer;
static struct usb_hcd *errata2_timer_hcd;
static void errata2_function(struct timer_list *unused)
{
struct usb_hcd *hcd = errata2_timer_hcd;
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
int slot;
struct ptd ptd;
unsigned long spinflags;
spin_lock_irqsave(&priv->lock, spinflags);
for (slot = 0; slot < mem->slot_num; slot++)
if (priv->atl_slots[slot].qh && time_after(jiffies,
priv->atl_slots[slot].timestamp +
msecs_to_jiffies(SLOT_TIMEOUT))) {
ptd_read(hcd, ATL_PTD_OFFSET, slot, &ptd);
if (!FROM_DW0_VALID(ptd.dw0) &&
!FROM_DW3_ACTIVE(ptd.dw3))
priv->atl_done_map |= 1 << slot;
}
if (priv->atl_done_map)
handle_done_ptds(hcd);
spin_unlock_irqrestore(&priv->lock, spinflags);
errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
add_timer(&errata2_timer);
}
static int isp1763_run(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int retval;
u32 chipid_h;
u32 chipid_l;
u32 chip_rev;
u32 ptd_atl_int;
u32 ptd_iso;
hcd->uses_new_polling = 1;
hcd->state = HC_STATE_RUNNING;
chipid_h = isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
chipid_l = isp1760_hcd_read(hcd, HC_CHIP_ID_LOW);
chip_rev = isp1760_hcd_read(hcd, HC_CHIP_REV);
dev_info(hcd->self.controller, "USB ISP %02x%02x HW rev. %d started\n",
chipid_h, chipid_l, chip_rev);
isp1760_hcd_clear(hcd, ISO_BUF_FILL);
isp1760_hcd_clear(hcd, INT_BUF_FILL);
isp1760_hcd_clear(hcd, ATL_BUF_FILL);
isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
ndelay(100);
isp1760_hcd_clear(hcd, HC_ATL_PTD_DONEMAP);
isp1760_hcd_clear(hcd, HC_INT_PTD_DONEMAP);
isp1760_hcd_clear(hcd, HC_ISO_PTD_DONEMAP);
isp1760_hcd_set(hcd, HW_OTG_DISABLE);
isp1760_reg_write(priv->regs, ISP1763_HC_OTG_CTRL_CLEAR, BIT(7));
isp1760_reg_write(priv->regs, ISP1763_HC_OTG_CTRL_CLEAR, BIT(15));
mdelay(10);
isp1760_hcd_set(hcd, HC_INT_IRQ_ENABLE);
isp1760_hcd_set(hcd, HC_ATL_IRQ_ENABLE);
isp1760_hcd_set(hcd, HW_GLOBAL_INTR_EN);
isp1760_hcd_clear(hcd, HC_ATL_IRQ_MASK_AND);
isp1760_hcd_clear(hcd, HC_INT_IRQ_MASK_AND);
isp1760_hcd_clear(hcd, HC_ISO_IRQ_MASK_AND);
isp1760_hcd_set(hcd, HC_ATL_IRQ_MASK_OR);
isp1760_hcd_set(hcd, HC_INT_IRQ_MASK_OR);
isp1760_hcd_set(hcd, HC_ISO_IRQ_MASK_OR);
ptd_atl_int = 0x8000;
ptd_iso = 0x0001;
isp1760_hcd_write(hcd, HC_ATL_PTD_LASTPTD, ptd_atl_int);
isp1760_hcd_write(hcd, HC_INT_PTD_LASTPTD, ptd_atl_int);
isp1760_hcd_write(hcd, HC_ISO_PTD_LASTPTD, ptd_iso);
isp1760_hcd_set(hcd, ATL_BUF_FILL);
isp1760_hcd_set(hcd, INT_BUF_FILL);
isp1760_hcd_clear(hcd, CMD_LRESET);
isp1760_hcd_clear(hcd, CMD_RESET);
retval = isp1760_hcd_set_and_wait(hcd, CMD_RUN, 250 * 1000);
if (retval)
return retval;
down_write(&ehci_cf_port_reset_rwsem);
retval = isp1760_hcd_set_and_wait(hcd, FLAG_CF, 250 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
if (retval)
return retval;
return 0;
}
static int isp1760_run(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int retval;
u32 chipid_h;
u32 chipid_l;
u32 chip_rev;
u32 ptd_atl_int;
u32 ptd_iso;
/*
* ISP1763 have some differences in the setup and order to enable
* the ports, disable otg, setup buffers, and ATL, INT, ISO status.
* So, just handle it a separate sequence.
*/
if (priv->is_isp1763)
return isp1763_run(hcd);
hcd->uses_new_polling = 1;
hcd->state = HC_STATE_RUNNING;
/* Set PTD interrupt AND & OR maps */
isp1760_hcd_clear(hcd, HC_ATL_IRQ_MASK_AND);
isp1760_hcd_clear(hcd, HC_INT_IRQ_MASK_AND);
isp1760_hcd_clear(hcd, HC_ISO_IRQ_MASK_AND);
isp1760_hcd_set(hcd, HC_ATL_IRQ_MASK_OR);
isp1760_hcd_set(hcd, HC_INT_IRQ_MASK_OR);
isp1760_hcd_set(hcd, HC_ISO_IRQ_MASK_OR);
/* step 23 passed */
isp1760_hcd_set(hcd, HW_GLOBAL_INTR_EN);
isp1760_hcd_clear(hcd, CMD_LRESET);
isp1760_hcd_clear(hcd, CMD_RESET);
retval = isp1760_hcd_set_and_wait(hcd, CMD_RUN, 250 * 1000);
if (retval)
return retval;
/*
* XXX
* Spec says to write FLAG_CF as last config action, priv code grabs
* the semaphore while doing so.
*/
down_write(&ehci_cf_port_reset_rwsem);
retval = isp1760_hcd_set_and_wait(hcd, FLAG_CF, 250 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
if (retval)
return retval;
errata2_timer_hcd = hcd;
timer_setup(&errata2_timer, errata2_function, 0);
errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
add_timer(&errata2_timer);
chipid_h = isp1760_hcd_read(hcd, HC_CHIP_ID_HIGH);
chipid_l = isp1760_hcd_read(hcd, HC_CHIP_ID_LOW);
chip_rev = isp1760_hcd_read(hcd, HC_CHIP_REV);
dev_info(hcd->self.controller, "USB ISP %02x%02x HW rev. %d started\n",
chipid_h, chipid_l, chip_rev);
/* PTD Register Init Part 2, Step 28 */
/* Setup registers controlling PTD checking */
ptd_atl_int = 0x80000000;
ptd_iso = 0x00000001;
isp1760_hcd_write(hcd, HC_ATL_PTD_LASTPTD, ptd_atl_int);
isp1760_hcd_write(hcd, HC_INT_PTD_LASTPTD, ptd_atl_int);
isp1760_hcd_write(hcd, HC_ISO_PTD_LASTPTD, ptd_iso);
isp1760_hcd_set(hcd, HC_ATL_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_INT_PTD_SKIPMAP);
isp1760_hcd_set(hcd, HC_ISO_PTD_SKIPMAP);
isp1760_hcd_set(hcd, ATL_BUF_FILL);
isp1760_hcd_set(hcd, INT_BUF_FILL);
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
* since the class device isn't created that early.
*/
return 0;
}
static int qtd_fill(struct isp1760_qtd *qtd, void *databuffer, size_t len)
{
qtd->data_buffer = databuffer;
qtd->length = len;
return qtd->length;
}
static void qtd_list_free(struct list_head *qtd_list)
{
struct isp1760_qtd *qtd, *qtd_next;
list_for_each_entry_safe(qtd, qtd_next, qtd_list, qtd_list) {
list_del(&qtd->qtd_list);
qtd_free(qtd);
}
}
/*
* Packetize urb->transfer_buffer into list of packets of size wMaxPacketSize.
* Also calculate the PID type (SETUP/IN/OUT) for each packet.
*/
static void packetize_urb(struct usb_hcd *hcd,
struct urb *urb, struct list_head *head, gfp_t flags)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
const struct isp1760_memory_layout *mem = priv->memory_layout;
struct isp1760_qtd *qtd;
void *buf;
int len, maxpacketsize;
u8 packet_type;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
if (!urb->transfer_buffer && urb->transfer_buffer_length) {
/* XXX This looks like usb storage / SCSI bug */
dev_err(hcd->self.controller,
"buf is null, dma is %08lx len is %d\n",
(long unsigned)urb->transfer_dma,
urb->transfer_buffer_length);
WARN_ON(1);
}
if (usb_pipein(urb->pipe))
packet_type = IN_PID;
else
packet_type = OUT_PID;
if (usb_pipecontrol(urb->pipe)) {
qtd = qtd_alloc(flags, urb, SETUP_PID);
if (!qtd)
goto cleanup;
qtd_fill(qtd, urb->setup_packet, sizeof(struct usb_ctrlrequest));
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (urb->transfer_buffer_length == 0)
packet_type = IN_PID;
}
maxpacketsize = usb_maxpacket(urb->dev, urb->pipe);
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
buf = urb->transfer_buffer;
len = urb->transfer_buffer_length;
for (;;) {
int this_qtd_len;
qtd = qtd_alloc(flags, urb, packet_type);
if (!qtd)
goto cleanup;
if (len > mem->blocks_size[ISP176x_BLOCK_NUM - 1])
this_qtd_len = mem->blocks_size[ISP176x_BLOCK_NUM - 1];
else
this_qtd_len = len;
this_qtd_len = qtd_fill(qtd, buf, this_qtd_len);
list_add_tail(&qtd->qtd_list, head);
len -= this_qtd_len;
buf += this_qtd_len;
if (len <= 0)
break;
}
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (urb->transfer_buffer_length != 0) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
if (packet_type == IN_PID)
packet_type = OUT_PID;
else
packet_type = IN_PID;
} else if (usb_pipebulk(urb->pipe) && maxpacketsize
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length %
maxpacketsize)) {
one_more = 1;
}
if (one_more) {
qtd = qtd_alloc(flags, urb, packet_type);
if (!qtd)
goto cleanup;
/* never any data in such packets */
qtd_fill(qtd, NULL, 0);
list_add_tail(&qtd->qtd_list, head);
}
}
return;
cleanup:
qtd_list_free(head);
}
static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct list_head *ep_queue;
struct isp1760_qh *qh, *qhit;
unsigned long spinflags;
LIST_HEAD(new_qtds);
int retval;
int qh_in_queue;
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ep_queue = &priv->qh_list[QH_CONTROL];
break;
case PIPE_BULK:
ep_queue = &priv->qh_list[QH_BULK];
break;
case PIPE_INTERRUPT:
if (urb->interval < 0)
return -EINVAL;
/* FIXME: Check bandwidth */
ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
case PIPE_ISOCHRONOUS:
dev_err(hcd->self.controller, "%s: isochronous USB packets "
"not yet supported\n",
__func__);
return -EPIPE;
default:
dev_err(hcd->self.controller, "%s: unknown pipe type\n",
__func__);
return -EPIPE;
}
if (usb_pipein(urb->pipe))
urb->actual_length = 0;
packetize_urb(hcd, urb, &new_qtds, mem_flags);
if (list_empty(&new_qtds))
return -ENOMEM;
spin_lock_irqsave(&priv->lock, spinflags);
if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
retval = -ESHUTDOWN;
qtd_list_free(&new_qtds);
goto out;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval) {
qtd_list_free(&new_qtds);
goto out;
}
qh = urb->ep->hcpriv;
if (qh) {
qh_in_queue = 0;
list_for_each_entry(qhit, ep_queue, qh_list) {
if (qhit == qh) {
qh_in_queue = 1;
break;
}
}
if (!qh_in_queue)
list_add_tail(&qh->qh_list, ep_queue);
} else {
qh = qh_alloc(GFP_ATOMIC);
if (!qh) {
retval = -ENOMEM;
usb_hcd_unlink_urb_from_ep(hcd, urb);
qtd_list_free(&new_qtds);
goto out;
}
list_add_tail(&qh->qh_list, ep_queue);
urb->ep->hcpriv = qh;
}
list_splice_tail(&new_qtds, &qh->qtd_list);
schedule_ptds(hcd);
out:
spin_unlock_irqrestore(&priv->lock, spinflags);
return retval;
}
static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
struct isp1760_qh *qh)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int skip_map;
WARN_ON(qh->slot == -1);
/* We need to forcefully reclaim the slot since some transfers never
return, e.g. interrupt transfers and NAKed bulk transfers. */
if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
if (qh->slot != -1) {
skip_map = isp1760_hcd_read(hcd, HC_ATL_PTD_SKIPMAP);
skip_map |= (1 << qh->slot);
isp1760_hcd_write(hcd, HC_ATL_PTD_SKIPMAP, skip_map);
ndelay(100);
}
priv->atl_slots[qh->slot].qh = NULL;
priv->atl_slots[qh->slot].qtd = NULL;
} else {
if (qh->slot != -1) {
skip_map = isp1760_hcd_read(hcd, HC_INT_PTD_SKIPMAP);
skip_map |= (1 << qh->slot);
isp1760_hcd_write(hcd, HC_INT_PTD_SKIPMAP, skip_map);
}
priv->int_slots[qh->slot].qh = NULL;
priv->int_slots[qh->slot].qtd = NULL;
}
qh->slot = -1;
}
/*
* Retire the qtds beginning at 'qtd' and belonging all to the same urb, killing
* any active transfer belonging to the urb in the process.
*/
static void dequeue_urb_from_qtd(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd)
{
struct urb *urb;
int urb_was_running;
urb = qtd->urb;
urb_was_running = 0;
list_for_each_entry_from(qtd, &qh->qtd_list, qtd_list) {
if (qtd->urb != urb)
break;
if (qtd->status >= QTD_XFER_STARTED)
urb_was_running = 1;
if (last_qtd_of_urb(qtd, qh) &&
(qtd->status >= QTD_XFER_COMPLETE))
urb_was_running = 0;
if (qtd->status == QTD_XFER_STARTED)
kill_transfer(hcd, urb, qh);
qtd->status = QTD_RETIRE;
}
if ((urb->dev->speed != USB_SPEED_HIGH) && urb_was_running) {
qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(urb))
/* Clear failed; let's hope things work anyway */
qh->tt_buffer_dirty = 0;
}
}
static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
unsigned long spinflags;
struct isp1760_qh *qh;
struct isp1760_qtd *qtd;
int retval = 0;
spin_lock_irqsave(&priv->lock, spinflags);
retval = usb_hcd_check_unlink_urb(hcd, urb, status);
if (retval)
goto out;
qh = urb->ep->hcpriv;
if (!qh) {
retval = -EINVAL;
goto out;
}
list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
if (qtd->urb == urb) {
dequeue_urb_from_qtd(hcd, qh, qtd);
list_move(&qtd->qtd_list, &qh->qtd_list);
break;
}
urb->status = status;
schedule_ptds(hcd);
out:
spin_unlock_irqrestore(&priv->lock, spinflags);
return retval;
}
static void isp1760_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
unsigned long spinflags;
struct isp1760_qh *qh, *qh_iter;
int i;
spin_lock_irqsave(&priv->lock, spinflags);
qh = ep->hcpriv;
if (!qh)
goto out;
WARN_ON(!list_empty(&qh->qtd_list));
for (i = 0; i < QH_END; i++)
list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
if (qh_iter == qh) {
list_del(&qh_iter->qh_list);
i = QH_END;
break;
}
qh_free(qh);
ep->hcpriv = NULL;
schedule_ptds(hcd);
out:
spin_unlock_irqrestore(&priv->lock, spinflags);
}
static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 status = 0;
int retval = 1;
unsigned long flags;
/* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf[0] = 0;
spin_lock_irqsave(&priv->lock, flags);
if (isp1760_hcd_is_set(hcd, PORT_OWNER) &&
isp1760_hcd_is_set(hcd, PORT_CSC)) {
isp1760_hcd_clear(hcd, PORT_CSC);
goto done;
}
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if (isp1760_hcd_is_set(hcd, PORT_CSC) ||
(isp1760_hcd_is_set(hcd, PORT_RESUME) &&
time_after_eq(jiffies, priv->reset_done))) {
buf [0] |= 1 << (0 + 1);
status = STS_PCD;
}
/* FIXME autosuspend idle root hubs */
done:
spin_unlock_irqrestore(&priv->lock, flags);
return status ? retval : 0;
}
static void isp1760_hub_descriptor(struct isp1760_hcd *priv,
struct usb_hub_descriptor *desc)
{
int ports;
u16 temp;
ports = isp1760_hcd_n_ports(priv->hcd);
desc->bDescriptorType = USB_DT_HUB;
/* priv 1.0, 2.3.9 says 20ms max */
desc->bPwrOn2PwrGood = 10;
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
/* per-port overcurrent reporting */
temp = HUB_CHAR_INDV_PORT_OCPM;
if (isp1760_hcd_ppc_is_set(priv->hcd))
/* per-port power control */
temp |= HUB_CHAR_INDV_PORT_LPSM;
else
/* no power switching */
temp |= HUB_CHAR_NO_LPSM;
desc->wHubCharacteristics = cpu_to_le16(temp);
}
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
static void check_reset_complete(struct usb_hcd *hcd, int index)
{
if (!(isp1760_hcd_is_set(hcd, PORT_CONNECT)))
return;
/* if reset finished and it's still not enabled -- handoff */
if (!isp1760_hcd_is_set(hcd, PORT_PE)) {
dev_info(hcd->self.controller,
"port %d full speed --> companion\n", index + 1);
isp1760_hcd_set(hcd, PORT_OWNER);
isp1760_hcd_clear(hcd, PORT_CSC);
} else {
dev_info(hcd->self.controller, "port %d high speed\n",
index + 1);
}
return;
}
static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 status;
unsigned long flags;
int retval = 0;
int ports;
ports = isp1760_hcd_n_ports(hcd);
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&priv->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
isp1760_hcd_clear(hcd, PORT_PE);
break;
case USB_PORT_FEAT_C_ENABLE:
/* XXX error? */
break;
case USB_PORT_FEAT_SUSPEND:
if (isp1760_hcd_is_set(hcd, PORT_RESET))
goto error;
if (isp1760_hcd_is_set(hcd, PORT_SUSPEND)) {
if (!isp1760_hcd_is_set(hcd, PORT_PE))
goto error;
/* resume signaling for 20 msec */
isp1760_hcd_clear(hcd, PORT_CSC);
isp1760_hcd_set(hcd, PORT_RESUME);
priv->reset_done = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
if (isp1760_hcd_ppc_is_set(hcd))
isp1760_hcd_clear(hcd, PORT_POWER);
break;
case USB_PORT_FEAT_C_CONNECTION:
isp1760_hcd_set(hcd, PORT_CSC);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
/* XXX error ?*/
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
isp1760_hcd_read(hcd, CMD_RUN);
break;
case GetHubDescriptor:
isp1760_hub_descriptor(priv, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
/* wPortChange bits */
if (isp1760_hcd_is_set(hcd, PORT_CSC))
status |= USB_PORT_STAT_C_CONNECTION << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (isp1760_hcd_is_set(hcd, PORT_RESUME)) {
dev_err(hcd->self.controller, "Port resume should be skipped.\n");
/* Remote Wakeup received? */
if (!priv->reset_done) {
/* resume signaling for 20 msec */
priv->reset_done = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&hcd->rh_timer, priv->reset_done);
}
/* resume completed? */
else if (time_after_eq(jiffies,
priv->reset_done)) {
status |= USB_PORT_STAT_C_SUSPEND << 16;
priv->reset_done = 0;
/* stop resume signaling */
isp1760_hcd_clear(hcd, PORT_CSC);
retval = isp1760_hcd_clear_and_wait(hcd,
PORT_RESUME, 2000);
if (retval != 0) {
dev_err(hcd->self.controller,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
}
}
/* whoever resets must GetPortStatus to complete it!! */
if (isp1760_hcd_is_set(hcd, PORT_RESET) &&
time_after_eq(jiffies, priv->reset_done)) {
status |= USB_PORT_STAT_C_RESET << 16;
priv->reset_done = 0;
/* force reset to complete */
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = isp1760_hcd_clear_and_wait(hcd, PORT_RESET,
750);
if (retval != 0) {
dev_err(hcd->self.controller, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
check_reset_complete(hcd, wIndex);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (isp1760_hcd_is_set(hcd, PORT_OWNER))
dev_err(hcd->self.controller, "PORT_OWNER is set\n");
if (isp1760_hcd_is_set(hcd, PORT_CONNECT)) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= USB_PORT_STAT_HIGH_SPEED;
}
if (isp1760_hcd_is_set(hcd, PORT_PE))
status |= USB_PORT_STAT_ENABLE;
if (isp1760_hcd_is_set(hcd, PORT_SUSPEND) &&
isp1760_hcd_is_set(hcd, PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
if (isp1760_hcd_is_set(hcd, PORT_RESET))
status |= USB_PORT_STAT_RESET;
if (isp1760_hcd_is_set(hcd, PORT_POWER))
status |= USB_PORT_STAT_POWER;
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
if (isp1760_hcd_is_set(hcd, PORT_OWNER))
break;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
isp1760_hcd_set(hcd, PORT_PE);
break;
case USB_PORT_FEAT_SUSPEND:
if (!isp1760_hcd_is_set(hcd, PORT_PE) ||
isp1760_hcd_is_set(hcd, PORT_RESET))
goto error;
isp1760_hcd_set(hcd, PORT_SUSPEND);
break;
case USB_PORT_FEAT_POWER:
if (isp1760_hcd_ppc_is_set(hcd))
isp1760_hcd_set(hcd, PORT_POWER);
break;
case USB_PORT_FEAT_RESET:
if (isp1760_hcd_is_set(hcd, PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
if ((isp1760_hcd_is_set(hcd, PORT_CONNECT) &&
!isp1760_hcd_is_set(hcd, PORT_PE)) &&
(isp1760_hcd_read(hcd, PORT_LSTATUS) == 1)) {
isp1760_hcd_set(hcd, PORT_OWNER);
} else {
isp1760_hcd_set(hcd, PORT_RESET);
isp1760_hcd_clear(hcd, PORT_PE);
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
priv->reset_done = jiffies +
msecs_to_jiffies(50);
}
break;
default:
goto error;
}
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&priv->lock, flags);
return retval;
}
static int isp1760_get_frame(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 fr;
fr = isp1760_hcd_read(hcd, HC_FRINDEX);
return (fr >> 3) % priv->periodic_size;
}
static void isp1760_stop(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
del_timer(&errata2_timer);
isp1760_hub_control(hcd, ClearPortFeature, USB_PORT_FEAT_POWER, 1,
NULL, 0);
msleep(20);
spin_lock_irq(&priv->lock);
ehci_reset(hcd);
/* Disable IRQ */
isp1760_hcd_clear(hcd, HW_GLOBAL_INTR_EN);
spin_unlock_irq(&priv->lock);
isp1760_hcd_clear(hcd, FLAG_CF);
}
static void isp1760_shutdown(struct usb_hcd *hcd)
{
isp1760_stop(hcd);
isp1760_hcd_clear(hcd, HW_GLOBAL_INTR_EN);
isp1760_hcd_clear(hcd, CMD_RUN);
}
static void isp1760_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
struct isp1760_qh *qh = ep->hcpriv;
unsigned long spinflags;
if (!qh)
return;
spin_lock_irqsave(&priv->lock, spinflags);
qh->tt_buffer_dirty = 0;
schedule_ptds(hcd);
spin_unlock_irqrestore(&priv->lock, spinflags);
}
static const struct hc_driver isp1760_hc_driver = {
.description = "isp1760-hcd",
.product_desc = "NXP ISP1760 USB Host Controller",
.hcd_priv_size = sizeof(struct isp1760_hcd *),
.irq = isp1760_irq,
.flags = HCD_MEMORY | HCD_USB2,
.reset = isp1760_hc_setup,
.start = isp1760_run,
.stop = isp1760_stop,
.shutdown = isp1760_shutdown,
.urb_enqueue = isp1760_urb_enqueue,
.urb_dequeue = isp1760_urb_dequeue,
.endpoint_disable = isp1760_endpoint_disable,
.get_frame_number = isp1760_get_frame,
.hub_status_data = isp1760_hub_status_data,
.hub_control = isp1760_hub_control,
.clear_tt_buffer_complete = isp1760_clear_tt_buffer_complete,
};
int __init isp1760_init_kmem_once(void)
{
urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
SLAB_MEM_SPREAD, NULL);
if (!urb_listitem_cachep)
return -ENOMEM;
qtd_cachep = kmem_cache_create("isp1760_qtd",
sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
SLAB_MEM_SPREAD, NULL);
if (!qtd_cachep)
goto destroy_urb_listitem;
qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
if (!qh_cachep)
goto destroy_qtd;
return 0;
destroy_qtd:
kmem_cache_destroy(qtd_cachep);
destroy_urb_listitem:
kmem_cache_destroy(urb_listitem_cachep);
return -ENOMEM;
}
void isp1760_deinit_kmem_cache(void)
{
kmem_cache_destroy(qtd_cachep);
kmem_cache_destroy(qh_cachep);
kmem_cache_destroy(urb_listitem_cachep);
}
int isp1760_hcd_register(struct isp1760_hcd *priv, struct resource *mem,
int irq, unsigned long irqflags,
struct device *dev)
{
const struct isp1760_memory_layout *mem_layout = priv->memory_layout;
struct usb_hcd *hcd;
int ret;
hcd = usb_create_hcd(&isp1760_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
*(struct isp1760_hcd **)hcd->hcd_priv = priv;
priv->hcd = hcd;
priv->atl_slots = kcalloc(mem_layout->slot_num,
sizeof(struct isp1760_slotinfo), GFP_KERNEL);
if (!priv->atl_slots) {
ret = -ENOMEM;
goto put_hcd;
}
priv->int_slots = kcalloc(mem_layout->slot_num,
sizeof(struct isp1760_slotinfo), GFP_KERNEL);
if (!priv->int_slots) {
ret = -ENOMEM;
goto free_atl_slots;
}
init_memory(priv);
hcd->irq = irq;
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
/* This driver doesn't support wakeup requests */
hcd->cant_recv_wakeups = 1;
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto free_int_slots;
device_wakeup_enable(hcd->self.controller);
return 0;
free_int_slots:
kfree(priv->int_slots);
free_atl_slots:
kfree(priv->atl_slots);
put_hcd:
usb_put_hcd(hcd);
return ret;
}
void isp1760_hcd_unregister(struct isp1760_hcd *priv)
{
if (!priv->hcd)
return;
usb_remove_hcd(priv->hcd);
usb_put_hcd(priv->hcd);
kfree(priv->atl_slots);
kfree(priv->int_slots);
}
| linux-master | drivers/usb/isp1760/isp1760-hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
* Copyright 2021 Linaro, Rui Miguel Silva
* Copyright 2014 Laurent Pinchart
* Copyright 2007 Sebastian Siewior
*
* Contacts:
* Sebastian Siewior <[email protected]>
* Laurent Pinchart <[email protected]>
* Rui Miguel Silva <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "isp1760-core.h"
#include "isp1760-hcd.h"
#include "isp1760-regs.h"
#include "isp1760-udc.h"
static int isp1760_init_core(struct isp1760_device *isp)
{
struct isp1760_hcd *hcd = &isp->hcd;
struct isp1760_udc *udc = &isp->udc;
u32 otg_ctrl;
/* Low-level chip reset */
if (isp->rst_gpio) {
gpiod_set_value_cansleep(isp->rst_gpio, 1);
msleep(50);
gpiod_set_value_cansleep(isp->rst_gpio, 0);
}
/*
* Reset the host controller, including the CPU interface
* configuration.
*/
isp1760_field_set(hcd->fields, SW_RESET_RESET_ALL);
msleep(100);
/* Setup HW Mode Control: This assumes a level active-low interrupt */
if ((isp->devflags & ISP1760_FLAG_ANALOG_OC) && hcd->is_isp1763) {
dev_err(isp->dev, "isp1763 analog overcurrent not available\n");
return -EINVAL;
}
if (isp->devflags & ISP1760_FLAG_BUS_WIDTH_16)
isp1760_field_clear(hcd->fields, HW_DATA_BUS_WIDTH);
if (isp->devflags & ISP1760_FLAG_BUS_WIDTH_8)
isp1760_field_set(hcd->fields, HW_DATA_BUS_WIDTH);
if (isp->devflags & ISP1760_FLAG_ANALOG_OC)
isp1760_field_set(hcd->fields, HW_ANA_DIGI_OC);
if (isp->devflags & ISP1760_FLAG_DACK_POL_HIGH)
isp1760_field_set(hcd->fields, HW_DACK_POL_HIGH);
if (isp->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
isp1760_field_set(hcd->fields, HW_DREQ_POL_HIGH);
if (isp->devflags & ISP1760_FLAG_INTR_POL_HIGH)
isp1760_field_set(hcd->fields, HW_INTR_HIGH_ACT);
if (isp->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
isp1760_field_set(hcd->fields, HW_INTR_EDGE_TRIG);
/*
* The ISP1761 has a dedicated DC IRQ line but supports sharing the HC
* IRQ line for both the host and device controllers. Hardcode IRQ
* sharing for now and disable the DC interrupts globally to avoid
* spurious interrupts during HCD registration.
*/
if (isp->devflags & ISP1760_FLAG_ISP1761) {
isp1760_reg_write(udc->regs, ISP176x_DC_MODE, 0);
isp1760_field_set(hcd->fields, HW_COMN_IRQ);
}
/*
* PORT 1 Control register of the ISP1760 is the OTG control register
* on ISP1761.
*
* TODO: Really support OTG. For now we configure port 1 in device mode
*/
if (isp->devflags & ISP1760_FLAG_ISP1761) {
if (isp->devflags & ISP1760_FLAG_PERIPHERAL_EN) {
otg_ctrl = (ISP176x_HW_DM_PULLDOWN_CLEAR |
ISP176x_HW_DP_PULLDOWN_CLEAR |
ISP176x_HW_OTG_DISABLE);
} else {
otg_ctrl = (ISP176x_HW_SW_SEL_HC_DC_CLEAR |
ISP176x_HW_VBUS_DRV |
ISP176x_HW_SEL_CP_EXT);
}
isp1760_reg_write(hcd->regs, ISP176x_HC_OTG_CTRL, otg_ctrl);
}
dev_info(isp->dev, "%s bus width: %u, oc: %s\n",
hcd->is_isp1763 ? "isp1763" : "isp1760",
isp->devflags & ISP1760_FLAG_BUS_WIDTH_8 ? 8 :
isp->devflags & ISP1760_FLAG_BUS_WIDTH_16 ? 16 : 32,
hcd->is_isp1763 ? "not available" :
isp->devflags & ISP1760_FLAG_ANALOG_OC ? "analog" : "digital");
return 0;
}
void isp1760_set_pullup(struct isp1760_device *isp, bool enable)
{
struct isp1760_udc *udc = &isp->udc;
if (enable)
isp1760_field_set(udc->fields, HW_DP_PULLUP);
else
isp1760_field_set(udc->fields, HW_DP_PULLUP_CLEAR);
}
/*
* ISP1760/61:
*
* 60kb divided in:
* - 32 blocks @ 256 bytes
* - 20 blocks @ 1024 bytes
* - 4 blocks @ 8192 bytes
*/
static const struct isp1760_memory_layout isp176x_memory_conf = {
.blocks[0] = 32,
.blocks_size[0] = 256,
.blocks[1] = 20,
.blocks_size[1] = 1024,
.blocks[2] = 4,
.blocks_size[2] = 8192,
.slot_num = 32,
.payload_blocks = 32 + 20 + 4,
.payload_area_size = 0xf000,
};
/*
* ISP1763:
*
* 20kb divided in:
* - 8 blocks @ 256 bytes
* - 2 blocks @ 1024 bytes
* - 4 blocks @ 4096 bytes
*/
static const struct isp1760_memory_layout isp1763_memory_conf = {
.blocks[0] = 8,
.blocks_size[0] = 256,
.blocks[1] = 2,
.blocks_size[1] = 1024,
.blocks[2] = 4,
.blocks_size[2] = 4096,
.slot_num = 16,
.payload_blocks = 8 + 2 + 4,
.payload_area_size = 0x5000,
};
static const struct regmap_range isp176x_hc_volatile_ranges[] = {
regmap_reg_range(ISP176x_HC_USBCMD, ISP176x_HC_ATL_PTD_LASTPTD),
regmap_reg_range(ISP176x_HC_BUFFER_STATUS, ISP176x_HC_MEMORY),
regmap_reg_range(ISP176x_HC_INTERRUPT, ISP176x_HC_OTG_CTRL_CLEAR),
};
static const struct regmap_access_table isp176x_hc_volatile_table = {
.yes_ranges = isp176x_hc_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(isp176x_hc_volatile_ranges),
};
static const struct regmap_config isp1760_hc_regmap_conf = {
.name = "isp1760-hc",
.reg_bits = 16,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
.max_register = ISP176x_HC_OTG_CTRL_CLEAR,
.volatile_table = &isp176x_hc_volatile_table,
};
static const struct reg_field isp1760_hc_reg_fields[] = {
[HCS_PPC] = REG_FIELD(ISP176x_HC_HCSPARAMS, 4, 4),
[HCS_N_PORTS] = REG_FIELD(ISP176x_HC_HCSPARAMS, 0, 3),
[HCC_ISOC_CACHE] = REG_FIELD(ISP176x_HC_HCCPARAMS, 7, 7),
[HCC_ISOC_THRES] = REG_FIELD(ISP176x_HC_HCCPARAMS, 4, 6),
[CMD_LRESET] = REG_FIELD(ISP176x_HC_USBCMD, 7, 7),
[CMD_RESET] = REG_FIELD(ISP176x_HC_USBCMD, 1, 1),
[CMD_RUN] = REG_FIELD(ISP176x_HC_USBCMD, 0, 0),
[STS_PCD] = REG_FIELD(ISP176x_HC_USBSTS, 2, 2),
[HC_FRINDEX] = REG_FIELD(ISP176x_HC_FRINDEX, 0, 13),
[FLAG_CF] = REG_FIELD(ISP176x_HC_CONFIGFLAG, 0, 0),
[HC_ISO_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_ISO_PTD_DONEMAP, 0, 31),
[HC_ISO_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_ISO_PTD_SKIPMAP, 0, 31),
[HC_ISO_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_ISO_PTD_LASTPTD, 0, 31),
[HC_INT_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_INT_PTD_DONEMAP, 0, 31),
[HC_INT_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_INT_PTD_SKIPMAP, 0, 31),
[HC_INT_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_INT_PTD_LASTPTD, 0, 31),
[HC_ATL_PTD_DONEMAP] = REG_FIELD(ISP176x_HC_ATL_PTD_DONEMAP, 0, 31),
[HC_ATL_PTD_SKIPMAP] = REG_FIELD(ISP176x_HC_ATL_PTD_SKIPMAP, 0, 31),
[HC_ATL_PTD_LASTPTD] = REG_FIELD(ISP176x_HC_ATL_PTD_LASTPTD, 0, 31),
[PORT_OWNER] = REG_FIELD(ISP176x_HC_PORTSC1, 13, 13),
[PORT_POWER] = REG_FIELD(ISP176x_HC_PORTSC1, 12, 12),
[PORT_LSTATUS] = REG_FIELD(ISP176x_HC_PORTSC1, 10, 11),
[PORT_RESET] = REG_FIELD(ISP176x_HC_PORTSC1, 8, 8),
[PORT_SUSPEND] = REG_FIELD(ISP176x_HC_PORTSC1, 7, 7),
[PORT_RESUME] = REG_FIELD(ISP176x_HC_PORTSC1, 6, 6),
[PORT_PE] = REG_FIELD(ISP176x_HC_PORTSC1, 2, 2),
[PORT_CSC] = REG_FIELD(ISP176x_HC_PORTSC1, 1, 1),
[PORT_CONNECT] = REG_FIELD(ISP176x_HC_PORTSC1, 0, 0),
[ALL_ATX_RESET] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 31, 31),
[HW_ANA_DIGI_OC] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 15, 15),
[HW_COMN_IRQ] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 10, 10),
[HW_DATA_BUS_WIDTH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 8, 8),
[HW_DACK_POL_HIGH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 6, 6),
[HW_DREQ_POL_HIGH] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 5, 5),
[HW_INTR_HIGH_ACT] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 2, 2),
[HW_INTR_EDGE_TRIG] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 1, 1),
[HW_GLOBAL_INTR_EN] = REG_FIELD(ISP176x_HC_HW_MODE_CTRL, 0, 0),
[HC_CHIP_REV] = REG_FIELD(ISP176x_HC_CHIP_ID, 16, 31),
[HC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_HC_CHIP_ID, 8, 15),
[HC_CHIP_ID_LOW] = REG_FIELD(ISP176x_HC_CHIP_ID, 0, 7),
[HC_SCRATCH] = REG_FIELD(ISP176x_HC_SCRATCH, 0, 31),
[SW_RESET_RESET_ALL] = REG_FIELD(ISP176x_HC_RESET, 0, 0),
[ISO_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 2, 2),
[INT_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 1, 1),
[ATL_BUF_FILL] = REG_FIELD(ISP176x_HC_BUFFER_STATUS, 0, 0),
[MEM_BANK_SEL] = REG_FIELD(ISP176x_HC_MEMORY, 16, 17),
[MEM_START_ADDR] = REG_FIELD(ISP176x_HC_MEMORY, 0, 15),
[HC_INTERRUPT] = REG_FIELD(ISP176x_HC_INTERRUPT, 0, 9),
[HC_ATL_IRQ_ENABLE] = REG_FIELD(ISP176x_HC_INTERRUPT_ENABLE, 8, 8),
[HC_INT_IRQ_ENABLE] = REG_FIELD(ISP176x_HC_INTERRUPT_ENABLE, 7, 7),
[HC_ISO_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_ISO_IRQ_MASK_OR, 0, 31),
[HC_INT_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_INT_IRQ_MASK_OR, 0, 31),
[HC_ATL_IRQ_MASK_OR] = REG_FIELD(ISP176x_HC_ATL_IRQ_MASK_OR, 0, 31),
[HC_ISO_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ISO_IRQ_MASK_AND, 0, 31),
[HC_INT_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_INT_IRQ_MASK_AND, 0, 31),
[HC_ATL_IRQ_MASK_AND] = REG_FIELD(ISP176x_HC_ATL_IRQ_MASK_AND, 0, 31),
[HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 26, 26),
[HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 23, 23),
[HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 20, 20),
[HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 19, 19),
[HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 18, 18),
[HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 17, 17),
[HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP176x_HC_OTG_CTRL, 16, 16),
[HW_OTG_DISABLE] = REG_FIELD(ISP176x_HC_OTG_CTRL, 10, 10),
[HW_SW_SEL_HC_DC] = REG_FIELD(ISP176x_HC_OTG_CTRL, 7, 7),
[HW_VBUS_DRV] = REG_FIELD(ISP176x_HC_OTG_CTRL, 4, 4),
[HW_SEL_CP_EXT] = REG_FIELD(ISP176x_HC_OTG_CTRL, 3, 3),
[HW_DM_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 2, 2),
[HW_DP_PULLDOWN] = REG_FIELD(ISP176x_HC_OTG_CTRL, 1, 1),
[HW_DP_PULLUP] = REG_FIELD(ISP176x_HC_OTG_CTRL, 0, 0),
/* Make sure the array is sized properly during compilation */
[HC_FIELD_MAX] = {},
};
static const struct reg_field isp1763_hc_reg_fields[] = {
[CMD_LRESET] = REG_FIELD(ISP1763_HC_USBCMD, 7, 7),
[CMD_RESET] = REG_FIELD(ISP1763_HC_USBCMD, 1, 1),
[CMD_RUN] = REG_FIELD(ISP1763_HC_USBCMD, 0, 0),
[STS_PCD] = REG_FIELD(ISP1763_HC_USBSTS, 2, 2),
[HC_FRINDEX] = REG_FIELD(ISP1763_HC_FRINDEX, 0, 13),
[FLAG_CF] = REG_FIELD(ISP1763_HC_CONFIGFLAG, 0, 0),
[HC_ISO_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_ISO_PTD_DONEMAP, 0, 15),
[HC_ISO_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_ISO_PTD_SKIPMAP, 0, 15),
[HC_ISO_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_ISO_PTD_LASTPTD, 0, 15),
[HC_INT_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_INT_PTD_DONEMAP, 0, 15),
[HC_INT_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_INT_PTD_SKIPMAP, 0, 15),
[HC_INT_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_INT_PTD_LASTPTD, 0, 15),
[HC_ATL_PTD_DONEMAP] = REG_FIELD(ISP1763_HC_ATL_PTD_DONEMAP, 0, 15),
[HC_ATL_PTD_SKIPMAP] = REG_FIELD(ISP1763_HC_ATL_PTD_SKIPMAP, 0, 15),
[HC_ATL_PTD_LASTPTD] = REG_FIELD(ISP1763_HC_ATL_PTD_LASTPTD, 0, 15),
[PORT_OWNER] = REG_FIELD(ISP1763_HC_PORTSC1, 13, 13),
[PORT_POWER] = REG_FIELD(ISP1763_HC_PORTSC1, 12, 12),
[PORT_LSTATUS] = REG_FIELD(ISP1763_HC_PORTSC1, 10, 11),
[PORT_RESET] = REG_FIELD(ISP1763_HC_PORTSC1, 8, 8),
[PORT_SUSPEND] = REG_FIELD(ISP1763_HC_PORTSC1, 7, 7),
[PORT_RESUME] = REG_FIELD(ISP1763_HC_PORTSC1, 6, 6),
[PORT_PE] = REG_FIELD(ISP1763_HC_PORTSC1, 2, 2),
[PORT_CSC] = REG_FIELD(ISP1763_HC_PORTSC1, 1, 1),
[PORT_CONNECT] = REG_FIELD(ISP1763_HC_PORTSC1, 0, 0),
[HW_DATA_BUS_WIDTH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 4, 4),
[HW_DACK_POL_HIGH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 6, 6),
[HW_DREQ_POL_HIGH] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 5, 5),
[HW_INTF_LOCK] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 3, 3),
[HW_INTR_HIGH_ACT] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 2, 2),
[HW_INTR_EDGE_TRIG] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 1, 1),
[HW_GLOBAL_INTR_EN] = REG_FIELD(ISP1763_HC_HW_MODE_CTRL, 0, 0),
[SW_RESET_RESET_ATX] = REG_FIELD(ISP1763_HC_RESET, 3, 3),
[SW_RESET_RESET_ALL] = REG_FIELD(ISP1763_HC_RESET, 0, 0),
[HC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_HC_CHIP_ID, 0, 15),
[HC_CHIP_ID_LOW] = REG_FIELD(ISP1763_HC_CHIP_REV, 8, 15),
[HC_CHIP_REV] = REG_FIELD(ISP1763_HC_CHIP_REV, 0, 7),
[HC_SCRATCH] = REG_FIELD(ISP1763_HC_SCRATCH, 0, 15),
[ISO_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 2, 2),
[INT_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 1, 1),
[ATL_BUF_FILL] = REG_FIELD(ISP1763_HC_BUFFER_STATUS, 0, 0),
[MEM_START_ADDR] = REG_FIELD(ISP1763_HC_MEMORY, 0, 15),
[HC_DATA] = REG_FIELD(ISP1763_HC_DATA, 0, 15),
[HC_INTERRUPT] = REG_FIELD(ISP1763_HC_INTERRUPT, 0, 10),
[HC_ATL_IRQ_ENABLE] = REG_FIELD(ISP1763_HC_INTERRUPT_ENABLE, 8, 8),
[HC_INT_IRQ_ENABLE] = REG_FIELD(ISP1763_HC_INTERRUPT_ENABLE, 7, 7),
[HC_ISO_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_ISO_IRQ_MASK_OR, 0, 15),
[HC_INT_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_INT_IRQ_MASK_OR, 0, 15),
[HC_ATL_IRQ_MASK_OR] = REG_FIELD(ISP1763_HC_ATL_IRQ_MASK_OR, 0, 15),
[HC_ISO_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_ISO_IRQ_MASK_AND, 0, 15),
[HC_INT_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_INT_IRQ_MASK_AND, 0, 15),
[HC_ATL_IRQ_MASK_AND] = REG_FIELD(ISP1763_HC_ATL_IRQ_MASK_AND, 0, 15),
[HW_HC_2_DIS] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 15, 15),
[HW_OTG_DISABLE] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 10, 10),
[HW_SW_SEL_HC_DC] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 7, 7),
[HW_VBUS_DRV] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 4, 4),
[HW_SEL_CP_EXT] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 3, 3),
[HW_DM_PULLDOWN] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 2, 2),
[HW_DP_PULLDOWN] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 1, 1),
[HW_DP_PULLUP] = REG_FIELD(ISP1763_HC_OTG_CTRL_SET, 0, 0),
[HW_HC_2_DIS_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 15, 15),
[HW_OTG_DISABLE_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 10, 10),
[HW_SW_SEL_HC_DC_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 7, 7),
[HW_VBUS_DRV_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 4, 4),
[HW_SEL_CP_EXT_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 3, 3),
[HW_DM_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 2, 2),
[HW_DP_PULLDOWN_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 1, 1),
[HW_DP_PULLUP_CLEAR] = REG_FIELD(ISP1763_HC_OTG_CTRL_CLEAR, 0, 0),
/* Make sure the array is sized properly during compilation */
[HC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_hc_volatile_ranges[] = {
regmap_reg_range(ISP1763_HC_USBCMD, ISP1763_HC_ATL_PTD_LASTPTD),
regmap_reg_range(ISP1763_HC_BUFFER_STATUS, ISP1763_HC_DATA),
regmap_reg_range(ISP1763_HC_INTERRUPT, ISP1763_HC_OTG_CTRL_CLEAR),
};
static const struct regmap_access_table isp1763_hc_volatile_table = {
.yes_ranges = isp1763_hc_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(isp1763_hc_volatile_ranges),
};
static const struct regmap_config isp1763_hc_regmap_conf = {
.name = "isp1763-hc",
.reg_bits = 8,
.reg_stride = 2,
.val_bits = 16,
.fast_io = true,
.max_register = ISP1763_HC_OTG_CTRL_CLEAR,
.volatile_table = &isp1763_hc_volatile_table,
};
static const struct regmap_range isp176x_dc_volatile_ranges[] = {
regmap_reg_range(ISP176x_DC_EPMAXPKTSZ, ISP176x_DC_EPTYPE),
regmap_reg_range(ISP176x_DC_BUFLEN, ISP176x_DC_EPINDEX),
};
static const struct regmap_access_table isp176x_dc_volatile_table = {
.yes_ranges = isp176x_dc_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(isp176x_dc_volatile_ranges),
};
static const struct regmap_config isp1761_dc_regmap_conf = {
.name = "isp1761-dc",
.reg_bits = 16,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
.max_register = ISP176x_DC_TESTMODE,
.volatile_table = &isp176x_dc_volatile_table,
};
static const struct reg_field isp1761_dc_reg_fields[] = {
[DC_DEVEN] = REG_FIELD(ISP176x_DC_ADDRESS, 7, 7),
[DC_DEVADDR] = REG_FIELD(ISP176x_DC_ADDRESS, 0, 6),
[DC_VBUSSTAT] = REG_FIELD(ISP176x_DC_MODE, 8, 8),
[DC_SFRESET] = REG_FIELD(ISP176x_DC_MODE, 4, 4),
[DC_GLINTENA] = REG_FIELD(ISP176x_DC_MODE, 3, 3),
[DC_CDBGMOD_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 6, 6),
[DC_DDBGMODIN_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 4, 4),
[DC_DDBGMODOUT_ACK] = REG_FIELD(ISP176x_DC_INTCONF, 2, 2),
[DC_INTPOL] = REG_FIELD(ISP176x_DC_INTCONF, 0, 0),
[DC_IEPRXTX_7] = REG_FIELD(ISP176x_DC_INTENABLE, 25, 25),
[DC_IEPRXTX_6] = REG_FIELD(ISP176x_DC_INTENABLE, 23, 23),
[DC_IEPRXTX_5] = REG_FIELD(ISP176x_DC_INTENABLE, 21, 21),
[DC_IEPRXTX_4] = REG_FIELD(ISP176x_DC_INTENABLE, 19, 19),
[DC_IEPRXTX_3] = REG_FIELD(ISP176x_DC_INTENABLE, 17, 17),
[DC_IEPRXTX_2] = REG_FIELD(ISP176x_DC_INTENABLE, 15, 15),
[DC_IEPRXTX_1] = REG_FIELD(ISP176x_DC_INTENABLE, 13, 13),
[DC_IEPRXTX_0] = REG_FIELD(ISP176x_DC_INTENABLE, 11, 11),
[DC_IEP0SETUP] = REG_FIELD(ISP176x_DC_INTENABLE, 8, 8),
[DC_IEVBUS] = REG_FIELD(ISP176x_DC_INTENABLE, 7, 7),
[DC_IEHS_STA] = REG_FIELD(ISP176x_DC_INTENABLE, 5, 5),
[DC_IERESM] = REG_FIELD(ISP176x_DC_INTENABLE, 4, 4),
[DC_IESUSP] = REG_FIELD(ISP176x_DC_INTENABLE, 3, 3),
[DC_IEBRST] = REG_FIELD(ISP176x_DC_INTENABLE, 0, 0),
[DC_EP0SETUP] = REG_FIELD(ISP176x_DC_EPINDEX, 5, 5),
[DC_ENDPIDX] = REG_FIELD(ISP176x_DC_EPINDEX, 1, 4),
[DC_EPDIR] = REG_FIELD(ISP176x_DC_EPINDEX, 0, 0),
[DC_CLBUF] = REG_FIELD(ISP176x_DC_CTRLFUNC, 4, 4),
[DC_VENDP] = REG_FIELD(ISP176x_DC_CTRLFUNC, 3, 3),
[DC_DSEN] = REG_FIELD(ISP176x_DC_CTRLFUNC, 2, 2),
[DC_STATUS] = REG_FIELD(ISP176x_DC_CTRLFUNC, 1, 1),
[DC_STALL] = REG_FIELD(ISP176x_DC_CTRLFUNC, 0, 0),
[DC_BUFLEN] = REG_FIELD(ISP176x_DC_BUFLEN, 0, 15),
[DC_FFOSZ] = REG_FIELD(ISP176x_DC_EPMAXPKTSZ, 0, 10),
[DC_EPENABLE] = REG_FIELD(ISP176x_DC_EPTYPE, 3, 3),
[DC_ENDPTYP] = REG_FIELD(ISP176x_DC_EPTYPE, 0, 1),
[DC_UFRAMENUM] = REG_FIELD(ISP176x_DC_FRAMENUM, 11, 13),
[DC_FRAMENUM] = REG_FIELD(ISP176x_DC_FRAMENUM, 0, 10),
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP176x_DC_CHIPID, 16, 31),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP176x_DC_CHIPID, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP176x_DC_SCRATCH, 0, 15),
/* Make sure the array is sized properly during compilation */
[DC_FIELD_MAX] = {},
};
static const struct regmap_range isp1763_dc_volatile_ranges[] = {
regmap_reg_range(ISP1763_DC_EPMAXPKTSZ, ISP1763_DC_EPTYPE),
regmap_reg_range(ISP1763_DC_BUFLEN, ISP1763_DC_EPINDEX),
};
static const struct regmap_access_table isp1763_dc_volatile_table = {
.yes_ranges = isp1763_dc_volatile_ranges,
.n_yes_ranges = ARRAY_SIZE(isp1763_dc_volatile_ranges),
};
static const struct reg_field isp1763_dc_reg_fields[] = {
[DC_DEVEN] = REG_FIELD(ISP1763_DC_ADDRESS, 7, 7),
[DC_DEVADDR] = REG_FIELD(ISP1763_DC_ADDRESS, 0, 6),
[DC_VBUSSTAT] = REG_FIELD(ISP1763_DC_MODE, 8, 8),
[DC_SFRESET] = REG_FIELD(ISP1763_DC_MODE, 4, 4),
[DC_GLINTENA] = REG_FIELD(ISP1763_DC_MODE, 3, 3),
[DC_CDBGMOD_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 6, 6),
[DC_DDBGMODIN_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 4, 4),
[DC_DDBGMODOUT_ACK] = REG_FIELD(ISP1763_DC_INTCONF, 2, 2),
[DC_INTPOL] = REG_FIELD(ISP1763_DC_INTCONF, 0, 0),
[DC_IEPRXTX_7] = REG_FIELD(ISP1763_DC_INTENABLE, 25, 25),
[DC_IEPRXTX_6] = REG_FIELD(ISP1763_DC_INTENABLE, 23, 23),
[DC_IEPRXTX_5] = REG_FIELD(ISP1763_DC_INTENABLE, 21, 21),
[DC_IEPRXTX_4] = REG_FIELD(ISP1763_DC_INTENABLE, 19, 19),
[DC_IEPRXTX_3] = REG_FIELD(ISP1763_DC_INTENABLE, 17, 17),
[DC_IEPRXTX_2] = REG_FIELD(ISP1763_DC_INTENABLE, 15, 15),
[DC_IEPRXTX_1] = REG_FIELD(ISP1763_DC_INTENABLE, 13, 13),
[DC_IEPRXTX_0] = REG_FIELD(ISP1763_DC_INTENABLE, 11, 11),
[DC_IEP0SETUP] = REG_FIELD(ISP1763_DC_INTENABLE, 8, 8),
[DC_IEVBUS] = REG_FIELD(ISP1763_DC_INTENABLE, 7, 7),
[DC_IEHS_STA] = REG_FIELD(ISP1763_DC_INTENABLE, 5, 5),
[DC_IERESM] = REG_FIELD(ISP1763_DC_INTENABLE, 4, 4),
[DC_IESUSP] = REG_FIELD(ISP1763_DC_INTENABLE, 3, 3),
[DC_IEBRST] = REG_FIELD(ISP1763_DC_INTENABLE, 0, 0),
[DC_EP0SETUP] = REG_FIELD(ISP1763_DC_EPINDEX, 5, 5),
[DC_ENDPIDX] = REG_FIELD(ISP1763_DC_EPINDEX, 1, 4),
[DC_EPDIR] = REG_FIELD(ISP1763_DC_EPINDEX, 0, 0),
[DC_CLBUF] = REG_FIELD(ISP1763_DC_CTRLFUNC, 4, 4),
[DC_VENDP] = REG_FIELD(ISP1763_DC_CTRLFUNC, 3, 3),
[DC_DSEN] = REG_FIELD(ISP1763_DC_CTRLFUNC, 2, 2),
[DC_STATUS] = REG_FIELD(ISP1763_DC_CTRLFUNC, 1, 1),
[DC_STALL] = REG_FIELD(ISP1763_DC_CTRLFUNC, 0, 0),
[DC_BUFLEN] = REG_FIELD(ISP1763_DC_BUFLEN, 0, 15),
[DC_FFOSZ] = REG_FIELD(ISP1763_DC_EPMAXPKTSZ, 0, 10),
[DC_EPENABLE] = REG_FIELD(ISP1763_DC_EPTYPE, 3, 3),
[DC_ENDPTYP] = REG_FIELD(ISP1763_DC_EPTYPE, 0, 1),
[DC_UFRAMENUM] = REG_FIELD(ISP1763_DC_FRAMENUM, 11, 13),
[DC_FRAMENUM] = REG_FIELD(ISP1763_DC_FRAMENUM, 0, 10),
[DC_CHIP_ID_HIGH] = REG_FIELD(ISP1763_DC_CHIPID_HIGH, 0, 15),
[DC_CHIP_ID_LOW] = REG_FIELD(ISP1763_DC_CHIPID_LOW, 0, 15),
[DC_SCRATCH] = REG_FIELD(ISP1763_DC_SCRATCH, 0, 15),
/* Make sure the array is sized properly during compilation */
[DC_FIELD_MAX] = {},
};
static const struct regmap_config isp1763_dc_regmap_conf = {
.name = "isp1763-dc",
.reg_bits = 8,
.reg_stride = 2,
.val_bits = 16,
.fast_io = true,
.max_register = ISP1763_DC_TESTMODE,
.volatile_table = &isp1763_dc_volatile_table,
};
int isp1760_register(struct resource *mem, int irq, unsigned long irqflags,
struct device *dev, unsigned int devflags)
{
const struct regmap_config *hc_regmap;
const struct reg_field *hc_reg_fields;
const struct regmap_config *dc_regmap;
const struct reg_field *dc_reg_fields;
struct isp1760_device *isp;
struct isp1760_hcd *hcd;
struct isp1760_udc *udc;
struct regmap_field *f;
bool udc_enabled;
int ret;
int i;
/*
* If neither the HCD not the UDC is enabled return an error, as no
* device would be registered.
*/
udc_enabled = ((devflags & ISP1760_FLAG_ISP1763) ||
(devflags & ISP1760_FLAG_ISP1761));
if ((!IS_ENABLED(CONFIG_USB_ISP1760_HCD) || usb_disabled()) &&
(!udc_enabled || !IS_ENABLED(CONFIG_USB_ISP1761_UDC)))
return -ENODEV;
isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
if (!isp)
return -ENOMEM;
isp->dev = dev;
isp->devflags = devflags;
hcd = &isp->hcd;
udc = &isp->udc;
hcd->is_isp1763 = !!(devflags & ISP1760_FLAG_ISP1763);
udc->is_isp1763 = !!(devflags & ISP1760_FLAG_ISP1763);
if (!hcd->is_isp1763 && (devflags & ISP1760_FLAG_BUS_WIDTH_8)) {
dev_err(dev, "isp1760/61 do not support data width 8\n");
return -EINVAL;
}
if (hcd->is_isp1763) {
hc_regmap = &isp1763_hc_regmap_conf;
hc_reg_fields = &isp1763_hc_reg_fields[0];
dc_regmap = &isp1763_dc_regmap_conf;
dc_reg_fields = &isp1763_dc_reg_fields[0];
} else {
hc_regmap = &isp1760_hc_regmap_conf;
hc_reg_fields = &isp1760_hc_reg_fields[0];
dc_regmap = &isp1761_dc_regmap_conf;
dc_reg_fields = &isp1761_dc_reg_fields[0];
}
isp->rst_gpio = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
if (IS_ERR(isp->rst_gpio))
return PTR_ERR(isp->rst_gpio);
hcd->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(hcd->base))
return PTR_ERR(hcd->base);
hcd->regs = devm_regmap_init_mmio(dev, hcd->base, hc_regmap);
if (IS_ERR(hcd->regs))
return PTR_ERR(hcd->regs);
for (i = 0; i < HC_FIELD_MAX; i++) {
f = devm_regmap_field_alloc(dev, hcd->regs, hc_reg_fields[i]);
if (IS_ERR(f))
return PTR_ERR(f);
hcd->fields[i] = f;
}
udc->regs = devm_regmap_init_mmio(dev, hcd->base, dc_regmap);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
for (i = 0; i < DC_FIELD_MAX; i++) {
f = devm_regmap_field_alloc(dev, udc->regs, dc_reg_fields[i]);
if (IS_ERR(f))
return PTR_ERR(f);
udc->fields[i] = f;
}
if (hcd->is_isp1763)
hcd->memory_layout = &isp1763_memory_conf;
else
hcd->memory_layout = &isp176x_memory_conf;
ret = isp1760_init_core(isp);
if (ret < 0)
return ret;
if (IS_ENABLED(CONFIG_USB_ISP1760_HCD) && !usb_disabled()) {
ret = isp1760_hcd_register(hcd, mem, irq,
irqflags | IRQF_SHARED, dev);
if (ret < 0)
return ret;
}
if (udc_enabled && IS_ENABLED(CONFIG_USB_ISP1761_UDC)) {
ret = isp1760_udc_register(isp, irq, irqflags);
if (ret < 0) {
isp1760_hcd_unregister(hcd);
return ret;
}
}
dev_set_drvdata(dev, isp);
return 0;
}
void isp1760_unregister(struct device *dev)
{
struct isp1760_device *isp = dev_get_drvdata(dev);
isp1760_udc_unregister(isp);
isp1760_hcd_unregister(&isp->hcd);
}
MODULE_DESCRIPTION("Driver for the ISP1760 USB-controller from NXP");
MODULE_AUTHOR("Sebastian Siewior <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/isp1760/isp1760-core.c |
// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
* devio.c -- User space communication with USB devices.
*
* Copyright (C) 1999-2000 Thomas Sailer ([email protected])
*
* This file implements the usbfs/x/y files, where
* x is the bus number and y the device number.
*
* It allows user space programs/"drivers" to communicate directly
* with USB devices without intervening kernel driver.
*
* Revision history
* 22.12.1999 0.1 Initial release (split from proc_usb.c)
* 04.01.2000 0.2 Turned into its own filesystem
* 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery
* (CAN-2005-3055)
*/
/*****************************************************************************/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/signal.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/usb/quirks.h>
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/user_namespace.h>
#include <linux/scatterlist.h>
#include <linux/uaccess.h>
#include <linux/dma-mapping.h>
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
#include "usb.h"
#ifdef CONFIG_PM
#define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND
#else
#define MAYBE_CAP_SUSPEND 0
#endif
#define USB_MAXBUS 64
#define USB_DEVICE_MAX (USB_MAXBUS * 128)
#define USB_SG_SIZE 16384 /* split-size for large txs */
/* Mutual exclusion for ps->list in resume vs. release and remove */
static DEFINE_MUTEX(usbfs_mutex);
struct usb_dev_state {
struct list_head list; /* state list */
struct usb_device *dev;
struct file *file;
spinlock_t lock; /* protects the async urb lists */
struct list_head async_pending;
struct list_head async_completed;
struct list_head memory_list;
wait_queue_head_t wait; /* wake up if a request completed */
wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */
unsigned int discsignr;
struct pid *disc_pid;
const struct cred *cred;
sigval_t disccontext;
unsigned long ifclaimed;
u32 disabled_bulk_eps;
unsigned long interface_allowed_mask;
int not_yet_resumed;
bool suspend_allowed;
bool privileges_dropped;
};
struct usb_memory {
struct list_head memlist;
int vma_use_count;
int urb_use_count;
u32 size;
void *mem;
dma_addr_t dma_handle;
unsigned long vm_start;
struct usb_dev_state *ps;
};
struct async {
struct list_head asynclist;
struct usb_dev_state *ps;
struct pid *pid;
const struct cred *cred;
unsigned int signr;
unsigned int ifnum;
void __user *userbuffer;
void __user *userurb;
sigval_t userurb_sigval;
struct urb *urb;
struct usb_memory *usbm;
unsigned int mem_usage;
int status;
u8 bulk_addr;
u8 bulk_status;
};
static bool usbfs_snoop;
module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
static unsigned usbfs_snoop_max = 65536;
module_param(usbfs_snoop_max, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usbfs_snoop_max,
"maximum number of bytes to print while snooping");
#define snoop(dev, format, arg...) \
do { \
if (usbfs_snoop) \
dev_info(dev, format, ## arg); \
} while (0)
enum snoop_when {
SUBMIT, COMPLETE
};
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
/* Limit on the total amount of memory we can allocate for transfers */
static u32 usbfs_memory_mb = 16;
module_param(usbfs_memory_mb, uint, 0644);
MODULE_PARM_DESC(usbfs_memory_mb,
"maximum MB allowed for usbfs buffers (0 = no limit)");
/* Hard limit, necessary to avoid arithmetic overflow */
#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
static DEFINE_SPINLOCK(usbfs_memory_usage_lock);
static u64 usbfs_memory_usage; /* Total memory currently allocated */
/* Check whether it's okay to allocate more memory for a transfer */
static int usbfs_increase_memory_usage(u64 amount)
{
u64 lim, total_mem;
unsigned long flags;
int ret;
lim = READ_ONCE(usbfs_memory_mb);
lim <<= 20;
ret = 0;
spin_lock_irqsave(&usbfs_memory_usage_lock, flags);
total_mem = usbfs_memory_usage + amount;
if (lim > 0 && total_mem > lim)
ret = -ENOMEM;
else
usbfs_memory_usage = total_mem;
spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags);
return ret;
}
/* Memory for a transfer is being deallocated */
static void usbfs_decrease_memory_usage(u64 amount)
{
unsigned long flags;
spin_lock_irqsave(&usbfs_memory_usage_lock, flags);
if (amount > usbfs_memory_usage)
usbfs_memory_usage = 0;
else
usbfs_memory_usage -= amount;
spin_unlock_irqrestore(&usbfs_memory_usage_lock, flags);
}
static int connected(struct usb_dev_state *ps)
{
return (!list_empty(&ps->list) &&
ps->dev->state != USB_STATE_NOTATTACHED);
}
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{
struct usb_dev_state *ps = usbm->ps;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
--*count;
if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) {
list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags);
hcd_buffer_free_pages(hcd, usbm->size,
usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory));
kfree(usbm);
} else {
spin_unlock_irqrestore(&ps->lock, flags);
}
}
static void usbdev_vm_open(struct vm_area_struct *vma)
{
struct usb_memory *usbm = vma->vm_private_data;
unsigned long flags;
spin_lock_irqsave(&usbm->ps->lock, flags);
++usbm->vma_use_count;
spin_unlock_irqrestore(&usbm->ps->lock, flags);
}
static void usbdev_vm_close(struct vm_area_struct *vma)
{
struct usb_memory *usbm = vma->vm_private_data;
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
}
static const struct vm_operations_struct usbdev_vm_ops = {
.open = usbdev_vm_open,
.close = usbdev_vm_close
};
static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
{
struct usb_memory *usbm = NULL;
struct usb_dev_state *ps = file->private_data;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
if (ret)
goto error;
usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL);
if (!usbm) {
ret = -ENOMEM;
goto error_decrease_mem;
}
mem = hcd_buffer_alloc_pages(hcd,
size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
}
memset(mem, 0, size);
usbm->mem = mem;
usbm->dma_handle = dma_handle;
usbm->size = size;
usbm->ps = ps;
usbm->vm_start = vma->vm_start;
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
/*
* In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
* normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
* whether we are in such cases, and then use remap_pfn_range (or
* dma_mmap_coherent) to map normal (or DMA) pages into the user
* space, respectively.
*/
if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
return -EAGAIN;
}
} else {
if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
size)) {
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
return -EAGAIN;
}
}
vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &usbdev_vm_ops;
vma->vm_private_data = usbm;
spin_lock_irqsave(&ps->lock, flags);
list_add_tail(&usbm->memlist, &ps->memory_list);
spin_unlock_irqrestore(&ps->lock, flags);
return 0;
error_free_usbm:
kfree(usbm);
error_decrease_mem:
usbfs_decrease_memory_usage(size + sizeof(struct usb_memory));
error:
return ret;
}
static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
ssize_t ret = 0;
unsigned len;
loff_t pos;
int i;
pos = *ppos;
usb_lock_device(dev);
if (!connected(ps)) {
ret = -ENODEV;
goto err;
} else if (pos < 0) {
ret = -EINVAL;
goto err;
}
if (pos < sizeof(struct usb_device_descriptor)) {
/* 18 bytes - fits on the stack */
struct usb_device_descriptor temp_desc;
memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor));
le16_to_cpus(&temp_desc.bcdUSB);
le16_to_cpus(&temp_desc.idVendor);
le16_to_cpus(&temp_desc.idProduct);
le16_to_cpus(&temp_desc.bcdDevice);
len = sizeof(struct usb_device_descriptor) - pos;
if (len > nbytes)
len = nbytes;
if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) {
ret = -EFAULT;
goto err;
}
*ppos += len;
buf += len;
nbytes -= len;
ret += len;
}
pos = sizeof(struct usb_device_descriptor);
for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
struct usb_config_descriptor *config =
(struct usb_config_descriptor *)dev->rawdescriptors[i];
unsigned int length = le16_to_cpu(config->wTotalLength);
if (*ppos < pos + length) {
/* The descriptor may claim to be longer than it
* really is. Here is the actual allocated length. */
unsigned alloclen =
le16_to_cpu(dev->config[i].desc.wTotalLength);
len = length - (*ppos - pos);
if (len > nbytes)
len = nbytes;
/* Simply don't write (skip over) unallocated parts */
if (alloclen > (*ppos - pos)) {
alloclen -= (*ppos - pos);
if (copy_to_user(buf,
dev->rawdescriptors[i] + (*ppos - pos),
min(len, alloclen))) {
ret = -EFAULT;
goto err;
}
}
*ppos += len;
buf += len;
nbytes -= len;
ret += len;
}
pos += length;
}
err:
usb_unlock_device(dev);
return ret;
}
/*
* async list handling
*/
static struct async *alloc_async(unsigned int numisoframes)
{
struct async *as;
as = kzalloc(sizeof(struct async), GFP_KERNEL);
if (!as)
return NULL;
as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL);
if (!as->urb) {
kfree(as);
return NULL;
}
return as;
}
static void free_async(struct async *as)
{
int i;
put_pid(as->pid);
if (as->cred)
put_cred(as->cred);
for (i = 0; i < as->urb->num_sgs; i++) {
if (sg_page(&as->urb->sg[i]))
kfree(sg_virt(&as->urb->sg[i]));
}
kfree(as->urb->sg);
if (as->usbm == NULL)
kfree(as->urb->transfer_buffer);
else
dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
usbfs_decrease_memory_usage(as->mem_usage);
kfree(as);
}
static void async_newpending(struct async *as)
{
struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
list_add_tail(&as->asynclist, &ps->async_pending);
spin_unlock_irqrestore(&ps->lock, flags);
}
static void async_removepending(struct async *as)
{
struct usb_dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
list_del_init(&as->asynclist);
spin_unlock_irqrestore(&ps->lock, flags);
}
static struct async *async_getcompleted(struct usb_dev_state *ps)
{
unsigned long flags;
struct async *as = NULL;
spin_lock_irqsave(&ps->lock, flags);
if (!list_empty(&ps->async_completed)) {
as = list_entry(ps->async_completed.next, struct async,
asynclist);
list_del_init(&as->asynclist);
}
spin_unlock_irqrestore(&ps->lock, flags);
return as;
}
static struct async *async_getpending(struct usb_dev_state *ps,
void __user *userurb)
{
struct async *as;
list_for_each_entry(as, &ps->async_pending, asynclist)
if (as->userurb == userurb) {
list_del_init(&as->asynclist);
return as;
}
return NULL;
}
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
int timeout_or_status, enum snoop_when when,
unsigned char *data, unsigned data_len)
{
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
int ep;
const char *t, *d;
if (!usbfs_snoop)
return;
ep = usb_pipeendpoint(pipe);
t = types[usb_pipetype(pipe)];
d = dirs[!!usb_pipein(pipe)];
if (userurb) { /* Async */
if (when == SUBMIT)
dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"length %u\n",
userurb, ep, t, d, length);
else
dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
"actual_length %u status %d\n",
userurb, ep, t, d, length,
timeout_or_status);
} else {
if (when == SUBMIT)
dev_info(&udev->dev, "ep%d %s-%s, length %u, "
"timeout %d\n",
ep, t, d, length, timeout_or_status);
else
dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, "
"status %d\n",
ep, t, d, length, timeout_or_status);
}
data_len = min(data_len, usbfs_snoop_max);
if (data && data_len > 0) {
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
data, data_len, 1);
}
}
static void snoop_urb_data(struct urb *urb, unsigned len)
{
int i, size;
len = min(len, usbfs_snoop_max);
if (!usbfs_snoop || len == 0)
return;
if (urb->num_sgs == 0) {
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
urb->transfer_buffer, len, 1);
return;
}
for (i = 0; i < urb->num_sgs && len; i++) {
size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len;
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
sg_virt(&urb->sg[i]), size, 1);
len -= size;
}
}
static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb)
{
unsigned i, len, size;
if (urb->number_of_packets > 0) /* Isochronous */
len = urb->transfer_buffer_length;
else /* Non-Isoc */
len = urb->actual_length;
if (urb->num_sgs == 0) {
if (copy_to_user(userbuffer, urb->transfer_buffer, len))
return -EFAULT;
return 0;
}
for (i = 0; i < urb->num_sgs && len; i++) {
size = (len > USB_SG_SIZE) ? USB_SG_SIZE : len;
if (copy_to_user(userbuffer, sg_virt(&urb->sg[i]), size))
return -EFAULT;
userbuffer += size;
len -= size;
}
return 0;
}
#define AS_CONTINUATION 1
#define AS_UNLINK 2
static void cancel_bulk_urbs(struct usb_dev_state *ps, unsigned bulk_addr)
__releases(ps->lock)
__acquires(ps->lock)
{
struct urb *urb;
struct async *as;
/* Mark all the pending URBs that match bulk_addr, up to but not
* including the first one without AS_CONTINUATION. If such an
* URB is encountered then a new transfer has already started so
* the endpoint doesn't need to be disabled; otherwise it does.
*/
list_for_each_entry(as, &ps->async_pending, asynclist) {
if (as->bulk_addr == bulk_addr) {
if (as->bulk_status != AS_CONTINUATION)
goto rescan;
as->bulk_status = AS_UNLINK;
as->bulk_addr = 0;
}
}
ps->disabled_bulk_eps |= (1 << bulk_addr);
/* Now carefully unlink all the marked pending URBs */
rescan:
list_for_each_entry_reverse(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
urb = as->urb;
usb_get_urb(urb);
spin_unlock(&ps->lock); /* Allow completions */
usb_unlink_urb(urb);
usb_put_urb(urb);
spin_lock(&ps->lock);
goto rescan;
}
}
}
static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
struct usb_dev_state *ps = as->ps;
struct pid *pid = NULL;
const struct cred *cred = NULL;
unsigned long flags;
sigval_t addr;
int signr, errno;
spin_lock_irqsave(&ps->lock, flags);
list_move_tail(&as->asynclist, &ps->async_completed);
as->status = urb->status;
signr = as->signr;
if (signr) {
errno = as->status;
addr = as->userurb_sigval;
pid = get_pid(as->pid);
cred = get_cred(as->cred);
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
as->status, COMPLETE, NULL, 0);
if (usb_urb_dir_in(urb))
snoop_urb_data(urb, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
wake_up(&ps->wait);
spin_unlock_irqrestore(&ps->lock, flags);
if (signr) {
kill_pid_usb_asyncio(signr, errno, addr, pid, cred);
put_pid(pid);
put_cred(cred);
}
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
{
struct urb *urb;
struct async *as;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
while (!list_empty(list)) {
as = list_last_entry(list, struct async, asynclist);
list_del_init(&as->asynclist);
urb = as->urb;
usb_get_urb(urb);
/* drop the spinlock so the completion handler can run */
spin_unlock_irqrestore(&ps->lock, flags);
usb_kill_urb(urb);
usb_put_urb(urb);
spin_lock_irqsave(&ps->lock, flags);
}
spin_unlock_irqrestore(&ps->lock, flags);
}
static void destroy_async_on_interface(struct usb_dev_state *ps,
unsigned int ifnum)
{
struct list_head *p, *q, hitlist;
unsigned long flags;
INIT_LIST_HEAD(&hitlist);
spin_lock_irqsave(&ps->lock, flags);
list_for_each_safe(p, q, &ps->async_pending)
if (ifnum == list_entry(p, struct async, asynclist)->ifnum)
list_move_tail(p, &hitlist);
spin_unlock_irqrestore(&ps->lock, flags);
destroy_async(ps, &hitlist);
}
static void destroy_all_async(struct usb_dev_state *ps)
{
destroy_async(ps, &ps->async_pending);
}
/*
* interface claims are made only at the request of user level code,
* which can also release them (explicitly or by closing files).
* they're also undone when devices disconnect.
*/
static int driver_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return -ENODEV;
}
static void driver_disconnect(struct usb_interface *intf)
{
struct usb_dev_state *ps = usb_get_intfdata(intf);
unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber;
if (!ps)
return;
/* NOTE: this relies on usbcore having canceled and completed
* all pending I/O requests; 2.6 does that.
*/
if (likely(ifnum < 8*sizeof(ps->ifclaimed)))
clear_bit(ifnum, &ps->ifclaimed);
else
dev_warn(&intf->dev, "interface number %u out of range\n",
ifnum);
usb_set_intfdata(intf, NULL);
/* force async requests to complete */
destroy_async_on_interface(ps, ifnum);
}
/* We don't care about suspend/resume of claimed interfaces */
static int driver_suspend(struct usb_interface *intf, pm_message_t msg)
{
return 0;
}
static int driver_resume(struct usb_interface *intf)
{
return 0;
}
#ifdef CONFIG_PM
/* The following routines apply to the entire device, not interfaces */
void usbfs_notify_suspend(struct usb_device *udev)
{
/* We don't need to handle this */
}
void usbfs_notify_resume(struct usb_device *udev)
{
struct usb_dev_state *ps;
/* Protect against simultaneous remove or release */
mutex_lock(&usbfs_mutex);
list_for_each_entry(ps, &udev->filelist, list) {
WRITE_ONCE(ps->not_yet_resumed, 0);
wake_up_all(&ps->wait_for_resume);
}
mutex_unlock(&usbfs_mutex);
}
#endif
struct usb_driver usbfs_driver = {
.name = "usbfs",
.probe = driver_probe,
.disconnect = driver_disconnect,
.suspend = driver_suspend,
.resume = driver_resume,
.supports_autosuspend = 1,
};
static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev = ps->dev;
struct usb_interface *intf;
int err;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return -EINVAL;
/* already claimed */
if (test_bit(ifnum, &ps->ifclaimed))
return 0;
if (ps->privileges_dropped &&
!test_bit(ifnum, &ps->interface_allowed_mask))
return -EACCES;
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
else {
unsigned int old_suppress;
/* suppress uevents while claiming interface */
old_suppress = dev_get_uevent_suppress(&intf->dev);
dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
dev_set_uevent_suppress(&intf->dev, old_suppress);
}
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
}
static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev;
struct usb_interface *intf;
int err;
err = -EINVAL;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return err;
dev = ps->dev;
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
unsigned int old_suppress;
/* suppress uevents while releasing interface */
old_suppress = dev_get_uevent_suppress(&intf->dev);
dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
}
static int checkintf(struct usb_dev_state *ps, unsigned int ifnum)
{
if (ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return -EINVAL;
if (test_bit(ifnum, &ps->ifclaimed))
return 0;
/* if not yet claimed, claim it for the driver */
dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim "
"interface %u before use\n", task_pid_nr(current),
current->comm, ifnum);
return claimintf(ps, ifnum);
}
static int findintfep(struct usb_device *dev, unsigned int ep)
{
unsigned int i, j, e;
struct usb_interface *intf;
struct usb_host_interface *alts;
struct usb_endpoint_descriptor *endpt;
if (ep & ~(USB_DIR_IN|0xf))
return -EINVAL;
if (!dev->actconfig)
return -ESRCH;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
intf = dev->actconfig->interface[i];
for (j = 0; j < intf->num_altsetting; j++) {
alts = &intf->altsetting[j];
for (e = 0; e < alts->desc.bNumEndpoints; e++) {
endpt = &alts->endpoint[e].desc;
if (endpt->bEndpointAddress == ep)
return alts->desc.bInterfaceNumber;
}
}
}
return -ENOENT;
}
static int check_ctrlrecip(struct usb_dev_state *ps, unsigned int requesttype,
unsigned int request, unsigned int index)
{
int ret = 0;
struct usb_host_interface *alt_setting;
if (ps->dev->state != USB_STATE_UNAUTHENTICATED
&& ps->dev->state != USB_STATE_ADDRESS
&& ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype))
return 0;
/*
* check for the special corner case 'get_device_id' in the printer
* class specification, which we always want to allow as it is used
* to query things like ink level, etc.
*/
if (requesttype == 0xa1 && request == 0) {
alt_setting = usb_find_alt_setting(ps->dev->actconfig,
index >> 8, index & 0xff);
if (alt_setting
&& alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
return 0;
}
index &= 0xff;
switch (requesttype & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
if ((index & ~USB_DIR_IN) == 0)
return 0;
ret = findintfep(ps->dev, index);
if (ret < 0) {
/*
* Some not fully compliant Win apps seem to get
* index wrong and have the endpoint number here
* rather than the endpoint address (with the
* correct direction). Win does let this through,
* so we'll not reject it here but leave it to
* the device to not break KVM. But we warn.
*/
ret = findintfep(ps->dev, index ^ 0x80);
if (ret >= 0)
dev_info(&ps->dev->dev,
"%s: process %i (%s) requesting ep %02x but needs %02x\n",
__func__, task_pid_nr(current),
current->comm, index, index ^ 0x80);
}
if (ret >= 0)
ret = checkintf(ps, ret);
break;
case USB_RECIP_INTERFACE:
ret = checkintf(ps, index);
break;
}
return ret;
}
static struct usb_host_endpoint *ep_to_host_endpoint(struct usb_device *dev,
unsigned char ep)
{
if (ep & USB_ENDPOINT_DIR_MASK)
return dev->ep_in[ep & USB_ENDPOINT_NUMBER_MASK];
else
return dev->ep_out[ep & USB_ENDPOINT_NUMBER_MASK];
}
static int parse_usbdevfs_streams(struct usb_dev_state *ps,
struct usbdevfs_streams __user *streams,
unsigned int *num_streams_ret,
unsigned int *num_eps_ret,
struct usb_host_endpoint ***eps_ret,
struct usb_interface **intf_ret)
{
unsigned int i, num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf = NULL;
unsigned char ep;
int ifnum, ret;
if (get_user(num_streams, &streams->num_streams) ||
get_user(num_eps, &streams->num_eps))
return -EFAULT;
if (num_eps < 1 || num_eps > USB_MAXENDPOINTS)
return -EINVAL;
/* The XHCI controller allows max 2 ^ 16 streams */
if (num_streams_ret && (num_streams < 2 || num_streams > 65536))
return -EINVAL;
eps = kmalloc_array(num_eps, sizeof(*eps), GFP_KERNEL);
if (!eps)
return -ENOMEM;
for (i = 0; i < num_eps; i++) {
if (get_user(ep, &streams->eps[i])) {
ret = -EFAULT;
goto error;
}
eps[i] = ep_to_host_endpoint(ps->dev, ep);
if (!eps[i]) {
ret = -EINVAL;
goto error;
}
/* usb_alloc/free_streams operate on an usb_interface */
ifnum = findintfep(ps->dev, ep);
if (ifnum < 0) {
ret = ifnum;
goto error;
}
if (i == 0) {
ret = checkintf(ps, ifnum);
if (ret < 0)
goto error;
intf = usb_ifnum_to_if(ps->dev, ifnum);
} else {
/* Verify all eps belong to the same interface */
if (ifnum != intf->altsetting->desc.bInterfaceNumber) {
ret = -EINVAL;
goto error;
}
}
}
if (num_streams_ret)
*num_streams_ret = num_streams;
*num_eps_ret = num_eps;
*eps_ret = eps;
*intf_ret = intf;
return 0;
error:
kfree(eps);
return ret;
}
static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
{
struct device *dev;
dev = bus_find_device_by_devt(&usb_bus_type, devt);
if (!dev)
return NULL;
return to_usb_device(dev);
}
/*
* file operations
*/
static int usbdev_open(struct inode *inode, struct file *file)
{
struct usb_device *dev = NULL;
struct usb_dev_state *ps;
int ret;
ret = -ENOMEM;
ps = kzalloc(sizeof(struct usb_dev_state), GFP_KERNEL);
if (!ps)
goto out_free_ps;
ret = -ENODEV;
/* usbdev device-node */
if (imajor(inode) == USB_DEVICE_MAJOR)
dev = usbdev_lookup_by_devt(inode->i_rdev);
if (!dev)
goto out_free_ps;
usb_lock_device(dev);
if (dev->state == USB_STATE_NOTATTACHED)
goto out_unlock_device;
ret = usb_autoresume_device(dev);
if (ret)
goto out_unlock_device;
ps->dev = dev;
ps->file = file;
ps->interface_allowed_mask = 0xFFFFFFFF; /* 32 bits */
spin_lock_init(&ps->lock);
INIT_LIST_HEAD(&ps->list);
INIT_LIST_HEAD(&ps->async_pending);
INIT_LIST_HEAD(&ps->async_completed);
INIT_LIST_HEAD(&ps->memory_list);
init_waitqueue_head(&ps->wait);
init_waitqueue_head(&ps->wait_for_resume);
ps->disc_pid = get_pid(task_pid(current));
ps->cred = get_current_cred();
smp_wmb();
/* Can't race with resume; the device is already active */
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
usb_unlock_device(dev);
snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
current->comm);
return ret;
out_unlock_device:
usb_unlock_device(dev);
usb_put_dev(dev);
out_free_ps:
kfree(ps);
return ret;
}
static int usbdev_release(struct inode *inode, struct file *file)
{
struct usb_dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
unsigned int ifnum;
struct async *as;
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
/* Protect against simultaneous resume */
mutex_lock(&usbfs_mutex);
list_del_init(&ps->list);
mutex_unlock(&usbfs_mutex);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
if (test_bit(ifnum, &ps->ifclaimed))
releaseintf(ps, ifnum);
}
destroy_all_async(ps);
if (!ps->suspend_allowed)
usb_autosuspend_device(dev);
usb_unlock_device(dev);
usb_put_dev(dev);
put_pid(ps->disc_pid);
put_cred(ps->cred);
as = async_getcompleted(ps);
while (as) {
free_async(as);
as = async_getcompleted(ps);
}
kfree(ps);
return 0;
}
static void usbfs_blocking_completion(struct urb *urb)
{
complete((struct completion *) urb->context);
}
/*
* Much like usb_start_wait_urb, but returns status separately from
* actual_length and uses a killable wait.
*/
static int usbfs_start_wait_urb(struct urb *urb, int timeout,
unsigned int *actlen)
{
DECLARE_COMPLETION_ONSTACK(ctx);
unsigned long expire;
int rc;
urb->context = &ctx;
urb->complete = usbfs_blocking_completion;
*actlen = 0;
rc = usb_submit_urb(urb, GFP_KERNEL);
if (unlikely(rc))
return rc;
expire = (timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT);
rc = wait_for_completion_killable_timeout(&ctx, expire);
if (rc <= 0) {
usb_kill_urb(urb);
*actlen = urb->actual_length;
if (urb->status != -ENOENT)
; /* Completed before it was killed */
else if (rc < 0)
return -EINTR;
else
return -ETIMEDOUT;
}
*actlen = urb->actual_length;
return urb->status;
}
static int do_proc_control(struct usb_dev_state *ps,
struct usbdevfs_ctrltransfer *ctrl)
{
struct usb_device *dev = ps->dev;
unsigned int tmo;
unsigned char *tbuf;
unsigned int wLength, actlen;
int i, pipe, ret;
struct urb *urb = NULL;
struct usb_ctrlrequest *dr = NULL;
ret = check_ctrlrecip(ps, ctrl->bRequestType, ctrl->bRequest,
ctrl->wIndex);
if (ret)
return ret;
wLength = ctrl->wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
if (ret)
return ret;
ret = -ENOMEM;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!tbuf)
goto done;
urb = usb_alloc_urb(0, GFP_NOIO);
if (!urb)
goto done;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
goto done;
dr->bRequestType = ctrl->bRequestType;
dr->bRequest = ctrl->bRequest;
dr->wValue = cpu_to_le16(ctrl->wValue);
dr->wIndex = cpu_to_le16(ctrl->wIndex);
dr->wLength = cpu_to_le16(ctrl->wLength);
tmo = ctrl->timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
if ((ctrl->bRequestType & USB_DIR_IN) && wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf,
wLength, NULL, NULL);
snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
/* Linger a bit, prior to the next control message. */
if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen);
if (!i && actlen) {
if (copy_to_user(ctrl->data, tbuf, actlen)) {
ret = -EFAULT;
goto done;
}
}
} else {
if (wLength) {
if (copy_from_user(tbuf, ctrl->data, wLength)) {
ret = -EFAULT;
goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf,
wLength, NULL, NULL);
snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, tbuf, wLength);
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
/* Linger a bit, prior to the next control message. */
if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0);
}
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl->bRequestType, ctrl->bRequest,
ctrl->wLength, i);
}
ret = (i < 0 ? i : actlen);
done:
kfree(dr);
usb_free_urb(urb);
free_page((unsigned long) tbuf);
usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
return ret;
}
static int proc_control(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_ctrltransfer ctrl;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
return do_proc_control(ps, &ctrl);
}
static int do_proc_bulk(struct usb_dev_state *ps,
struct usbdevfs_bulktransfer *bulk)
{
struct usb_device *dev = ps->dev;
unsigned int tmo, len1, len2, pipe;
unsigned char *tbuf;
int i, ret;
struct urb *urb = NULL;
struct usb_host_endpoint *ep;
ret = findintfep(ps->dev, bulk->ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
len1 = bulk->len;
if (len1 < 0 || len1 >= (INT_MAX - sizeof(struct urb)))
return -EINVAL;
if (bulk->ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(dev, bulk->ep & 0x7f);
else
pipe = usb_sndbulkpipe(dev, bulk->ep & 0x7f);
ep = usb_pipe_endpoint(dev, pipe);
if (!ep || !usb_endpoint_maxp(&ep->desc))
return -EINVAL;
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
return ret;
/*
* len1 can be almost arbitrarily large. Don't WARN if it's
* too big, just fail the request.
*/
ret = -ENOMEM;
tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
if (!tbuf)
goto done;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
goto done;
if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT) {
pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
usb_fill_int_urb(urb, dev, pipe, tbuf, len1,
NULL, NULL, ep->desc.bInterval);
} else {
usb_fill_bulk_urb(urb, dev, pipe, tbuf, len1, NULL, NULL);
}
tmo = bulk->timeout;
if (bulk->ep & 0x80) {
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &len2);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
if (!i && len2) {
if (copy_to_user(bulk->data, tbuf, len2)) {
ret = -EFAULT;
goto done;
}
}
} else {
if (len1) {
if (copy_from_user(tbuf, bulk->data, len1)) {
ret = -EFAULT;
goto done;
}
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &len2);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
ret = (i < 0 ? i : len2);
done:
usb_free_urb(urb);
kfree(tbuf);
usbfs_decrease_memory_usage(len1 + sizeof(struct urb));
return ret;
}
static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_bulktransfer bulk;
if (copy_from_user(&bulk, arg, sizeof(bulk)))
return -EFAULT;
return do_proc_bulk(ps, &bulk);
}
static void check_reset_of_active_ep(struct usb_device *udev,
unsigned int epnum, char *ioctl_name)
{
struct usb_host_endpoint **eps;
struct usb_host_endpoint *ep;
eps = (epnum & USB_DIR_IN) ? udev->ep_in : udev->ep_out;
ep = eps[epnum & 0x0f];
if (ep && !list_empty(&ep->urb_list))
dev_warn(&udev->dev, "Process %d (%s) called USBDEVFS_%s for active endpoint 0x%02x\n",
task_pid_nr(current), current->comm,
ioctl_name, epnum);
}
static int proc_resetep(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int ret;
if (get_user(ep, (unsigned int __user *)arg))
return -EFAULT;
ret = findintfep(ps->dev, ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
check_reset_of_active_ep(ps->dev, ep, "RESETEP");
usb_reset_endpoint(ps->dev, ep);
return 0;
}
static int proc_clearhalt(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ep;
int pipe;
int ret;
if (get_user(ep, (unsigned int __user *)arg))
return -EFAULT;
ret = findintfep(ps->dev, ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
check_reset_of_active_ep(ps->dev, ep, "CLEAR_HALT");
if (ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f);
else
pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f);
return usb_clear_halt(ps->dev, pipe);
}
static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_getdriver gd;
struct usb_interface *intf;
int ret;
if (copy_from_user(&gd, arg, sizeof(gd)))
return -EFAULT;
intf = usb_ifnum_to_if(ps->dev, gd.interface);
if (!intf || !intf->dev.driver)
ret = -ENODATA;
else {
strscpy(gd.driver, intf->dev.driver->name,
sizeof(gd.driver));
ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
}
return ret;
}
static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci;
memset(&ci, 0, sizeof(ci));
ci.devnum = ps->dev->devnum;
ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
static int proc_conninfo_ex(struct usb_dev_state *ps,
void __user *arg, size_t size)
{
struct usbdevfs_conninfo_ex ci;
struct usb_device *udev = ps->dev;
if (size < sizeof(ci.size))
return -EINVAL;
memset(&ci, 0, sizeof(ci));
ci.size = sizeof(ci);
ci.busnum = udev->bus->busnum;
ci.devnum = udev->devnum;
ci.speed = udev->speed;
while (udev && udev->portnum != 0) {
if (++ci.num_ports <= ARRAY_SIZE(ci.ports))
ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports] =
udev->portnum;
udev = udev->parent;
}
if (ci.num_ports < ARRAY_SIZE(ci.ports))
memmove(&ci.ports[0],
&ci.ports[ARRAY_SIZE(ci.ports) - ci.num_ports],
ci.num_ports);
if (copy_to_user(arg, &ci, min(sizeof(ci), size)))
return -EFAULT;
return 0;
}
static int proc_resetdevice(struct usb_dev_state *ps)
{
struct usb_host_config *actconfig = ps->dev->actconfig;
struct usb_interface *interface;
int i, number;
/* Don't allow a device reset if the process has dropped the
* privilege to do such things and any of the interfaces are
* currently claimed.
*/
if (ps->privileges_dropped && actconfig) {
for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) {
interface = actconfig->interface[i];
number = interface->cur_altsetting->desc.bInterfaceNumber;
if (usb_interface_claimed(interface) &&
!test_bit(number, &ps->ifclaimed)) {
dev_warn(&ps->dev->dev,
"usbfs: interface %d claimed by %s while '%s' resets device\n",
number, interface->dev.driver->name, current->comm);
return -EACCES;
}
}
}
return usb_reset_device(ps->dev);
}
static int proc_setintf(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_setinterface setintf;
int ret;
if (copy_from_user(&setintf, arg, sizeof(setintf)))
return -EFAULT;
ret = checkintf(ps, setintf.interface);
if (ret)
return ret;
destroy_async_on_interface(ps, setintf.interface);
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
static int proc_setconfig(struct usb_dev_state *ps, void __user *arg)
{
int u;
int status = 0;
struct usb_host_config *actconfig;
if (get_user(u, (int __user *)arg))
return -EFAULT;
actconfig = ps->dev->actconfig;
/* Don't touch the device if any interfaces are claimed.
* It could interfere with other drivers' operations, and if
* an interface is claimed by usbfs it could easily deadlock.
*/
if (actconfig) {
int i;
for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) {
if (usb_interface_claimed(actconfig->interface[i])) {
dev_warn(&ps->dev->dev,
"usbfs: interface %d claimed by %s "
"while '%s' sets config #%d\n",
actconfig->interface[i]
->cur_altsetting
->desc.bInterfaceNumber,
actconfig->interface[i]
->dev.driver->name,
current->comm, u);
status = -EBUSY;
break;
}
}
}
/* SET_CONFIGURATION is often abused as a "cheap" driver reset,
* so avoid usb_set_configuration()'s kick to sysfs
*/
if (status == 0) {
if (actconfig && actconfig->desc.bConfigurationValue == u)
status = usb_reset_configuration(ps->dev);
else
status = usb_set_configuration(ps->dev, u);
}
return status;
}
static struct usb_memory *
find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)
{
struct usb_memory *usbm = NULL, *iter;
unsigned long flags;
unsigned long uurb_start = (unsigned long)uurb->buffer;
spin_lock_irqsave(&ps->lock, flags);
list_for_each_entry(iter, &ps->memory_list, memlist) {
if (uurb_start >= iter->vm_start &&
uurb_start < iter->vm_start + iter->size) {
if (uurb->buffer_length > iter->vm_start + iter->size -
uurb_start) {
usbm = ERR_PTR(-EINVAL);
} else {
usbm = iter;
usbm->urb_use_count++;
}
break;
}
}
spin_unlock_irqrestore(&ps->lock, flags);
return usbm;
}
static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
void __user *arg, sigval_t userurb_sigval)
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
bool is_in;
bool allow_short = false;
bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT;
/* USBDEVFS_URB_ISO_ASAP is a special case */
if (uurb->type == USBDEVFS_URB_TYPE_ISO)
mask |= USBDEVFS_URB_ISO_ASAP;
if (uurb->flags & ~mask)
return -EINVAL;
if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
return -EINVAL;
if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
(uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) {
ifnum = findintfep(ps->dev, uurb->endpoint);
if (ifnum < 0)
return ifnum;
ret = checkintf(ps, ifnum);
if (ret)
return ret;
}
ep = ep_to_host_endpoint(ps->dev, uurb->endpoint);
if (!ep)
return -ENOENT;
is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
u = 0;
switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet */
if (uurb->buffer_length < 8)
return -EINVAL;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (copy_from_user(dr, uurb->buffer, 8)) {
ret = -EFAULT;
goto error;
}
if (uurb->buffer_length < (le16_to_cpu(dr->wLength) + 8)) {
ret = -EINVAL;
goto error;
}
ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpu(dr->wIndex));
if (ret)
goto error;
uurb->buffer_length = le16_to_cpu(dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
is_in = true;
uurb->endpoint |= USB_DIR_IN;
} else {
is_in = false;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
dr->bRequestType, dr->bRequest,
__le16_to_cpu(dr->wValue),
__le16_to_cpu(dr->wIndex),
__le16_to_cpu(dr->wLength));
u = sizeof(struct usb_ctrlrequest);
break;
case USBDEVFS_URB_TYPE_BULK:
if (!is_in)
allow_zero = true;
else
allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
/* allow single-shot interrupt transfers */
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
num_sgs = DIV_ROUND_UP(uurb->buffer_length, USB_SG_SIZE);
if (num_sgs == 1 || num_sgs > ps->dev->bus->sg_tablesize)
num_sgs = 0;
if (ep->streams)
stream_id = uurb->stream_id;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
if (!is_in)
allow_zero = true;
else
allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
/* arbitrary limit */
if (uurb->number_of_packets < 1 ||
uurb->number_of_packets > 128)
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
number_of_packets;
isopkt = memdup_user(iso_frame_desc, isofrmlen);
if (IS_ERR(isopkt)) {
ret = PTR_ERR(isopkt);
isopkt = NULL;
goto error;
}
for (totlen = u = 0; u < number_of_packets; u++) {
/*
* arbitrary limit need for USB 3.1 Gen2
* sizemax: 96 DPs at SSP, 96 * 1024 = 98304
*/
if (isopkt[u].length > 98304) {
ret = -EINVAL;
goto error;
}
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
uurb->buffer_length = totlen;
break;
default:
return -EINVAL;
}
if (uurb->buffer_length > 0 &&
!access_ok(uurb->buffer, uurb->buffer_length)) {
ret = -EFAULT;
goto error;
}
as = alloc_async(number_of_packets);
if (!as) {
ret = -ENOMEM;
goto error;
}
as->usbm = find_memory_area(ps, uurb);
if (IS_ERR(as->usbm)) {
ret = PTR_ERR(as->usbm);
as->usbm = NULL;
goto error;
}
/* do not use SG buffers when memory mapped segments
* are in use
*/
if (as->usbm)
num_sgs = 0;
u += sizeof(struct async) + sizeof(struct urb) +
(as->usbm ? 0 : uurb->buffer_length) +
num_sgs * sizeof(struct scatterlist);
ret = usbfs_increase_memory_usage(u);
if (ret)
goto error;
as->mem_usage = u;
if (num_sgs) {
as->urb->sg = kmalloc_array(num_sgs,
sizeof(struct scatterlist),
GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->sg) {
ret = -ENOMEM;
goto error;
}
as->urb->num_sgs = num_sgs;
sg_init_table(as->urb->sg, as->urb->num_sgs);
totlen = uurb->buffer_length;
for (i = 0; i < as->urb->num_sgs; i++) {
u = (totlen > USB_SG_SIZE) ? USB_SG_SIZE : totlen;
buf = kmalloc(u, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto error;
}
sg_set_buf(&as->urb->sg[i], buf, u);
if (!is_in) {
if (copy_from_user(buf, uurb->buffer, u)) {
ret = -EFAULT;
goto error;
}
uurb->buffer += u;
}
totlen -= u;
}
} else if (uurb->buffer_length > 0) {
if (as->usbm) {
unsigned long uurb_start = (unsigned long)uurb->buffer;
as->urb->transfer_buffer = as->usbm->mem +
(uurb_start - as->usbm->vm_start);
} else {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
GFP_KERNEL | __GFP_NOWARN);
if (!as->urb->transfer_buffer) {
ret = -ENOMEM;
goto error;
}
if (!is_in) {
if (copy_from_user(as->urb->transfer_buffer,
uurb->buffer,
uurb->buffer_length)) {
ret = -EFAULT;
goto error;
}
} else if (uurb->type == USBDEVFS_URB_TYPE_ISO) {
/*
* Isochronous input data may end up being
* discontiguous if some of the packets are
* short. Clear the buffer so that the gaps
* don't leak kernel data to userspace.
*/
memset(as->urb->transfer_buffer, 0,
uurb->buffer_length);
}
}
}
as->urb->dev = ps->dev;
as->urb->pipe = (uurb->type << 30) |
__create_pipe(ps->dev, uurb->endpoint & 0xf) |
(uurb->endpoint & USB_DIR_IN);
/* This tedious sequence is necessary because the URB_* flags
* are internal to the kernel and subject to change, whereas
* the USBDEVFS_URB_* flags are a user API and must not be changed.
*/
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
if (ep->desc.bInterval) {
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH ||
ps->dev->speed >= USB_SPEED_SUPER)
as->urb->interval = 1 <<
min(15, ep->desc.bInterval - 1);
else
as->urb->interval = ep->desc.bInterval;
}
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
}
kfree(isopkt);
isopkt = NULL;
as->ps = ps;
as->userurb = arg;
as->userurb_sigval = userurb_sigval;
if (as->usbm) {
unsigned long uurb_start = (unsigned long)uurb->buffer;
as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
as->urb->transfer_dma = as->usbm->dma_handle +
(uurb_start - as->usbm->vm_start);
} else if (is_in && uurb->buffer_length > 0)
as->userbuffer = uurb->buffer;
as->signr = uurb->signr;
as->ifnum = ifnum;
as->pid = get_pid(task_pid(current));
as->cred = get_current_cred();
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
as->urb->transfer_buffer_length, 0, SUBMIT,
NULL, 0);
if (!is_in)
snoop_urb_data(as->urb, as->urb->transfer_buffer_length);
async_newpending(as);
if (usb_endpoint_xfer_bulk(&ep->desc)) {
spin_lock_irq(&ps->lock);
/* Not exactly the endpoint address; the direction bit is
* shifted to the 0x10 position so that the value will be
* between 0 and 31.
*/
as->bulk_addr = usb_endpoint_num(&ep->desc) |
((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK)
>> 3);
/* If this bulk URB is the start of a new transfer, re-enable
* the endpoint. Otherwise mark it as a continuation URB.
*/
if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION)
as->bulk_status = AS_CONTINUATION;
else
ps->disabled_bulk_eps &= ~(1 << as->bulk_addr);
/* Don't accept continuation URBs if the endpoint is
* disabled because of an earlier error.
*/
if (ps->disabled_bulk_eps & (1 << as->bulk_addr))
ret = -EREMOTEIO;
else
ret = usb_submit_urb(as->urb, GFP_ATOMIC);
spin_unlock_irq(&ps->lock);
} else {
ret = usb_submit_urb(as->urb, GFP_KERNEL);
}
if (ret) {
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE, NULL, 0);
async_removepending(as);
goto error;
}
return 0;
error:
kfree(isopkt);
kfree(dr);
if (as)
free_async(as);
return ret;
}
static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
sigval_t userurb_sigval;
if (copy_from_user(&uurb, arg, sizeof(uurb)))
return -EFAULT;
memset(&userurb_sigval, 0, sizeof(userurb_sigval));
userurb_sigval.sival_ptr = arg;
return proc_do_submiturb(ps, &uurb,
(((struct usbdevfs_urb __user *)arg)->iso_frame_desc),
arg, userurb_sigval);
}
static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
{
struct urb *urb;
struct async *as;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
as = async_getpending(ps, arg);
if (!as) {
spin_unlock_irqrestore(&ps->lock, flags);
return -EINVAL;
}
urb = as->urb;
usb_get_urb(urb);
spin_unlock_irqrestore(&ps->lock, flags);
usb_kill_urb(urb);
usb_put_urb(urb);
return 0;
}
static void compute_isochronous_actual_length(struct urb *urb)
{
unsigned int i;
if (urb->number_of_packets > 0) {
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++)
urb->actual_length +=
urb->iso_frame_desc[i].actual_length;
}
}
static int processcompl(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
struct usbdevfs_urb __user *userurb = as->userurb;
void __user *addr = as->userurb;
unsigned int i;
compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
goto err_out;
}
if (put_user(as->status, &userurb->status))
goto err_out;
if (put_user(urb->actual_length, &userurb->actual_length))
goto err_out;
if (put_user(urb->error_count, &userurb->error_count))
goto err_out;
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for (i = 0; i < urb->number_of_packets; i++) {
if (put_user(urb->iso_frame_desc[i].actual_length,
&userurb->iso_frame_desc[i].actual_length))
goto err_out;
if (put_user(urb->iso_frame_desc[i].status,
&userurb->iso_frame_desc[i].status))
goto err_out;
}
}
if (put_user(addr, (void __user * __user *)arg))
return -EFAULT;
return 0;
err_out:
return -EFAULT;
}
static struct async *reap_as(struct usb_dev_state *ps)
{
DECLARE_WAITQUEUE(wait, current);
struct async *as = NULL;
struct usb_device *dev = ps->dev;
add_wait_queue(&ps->wait, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
as = async_getcompleted(ps);
if (as || !connected(ps))
break;
if (signal_pending(current))
break;
usb_unlock_device(dev);
schedule();
usb_lock_device(dev);
}
remove_wait_queue(&ps->wait, &wait);
set_current_state(TASK_RUNNING);
return as;
}
static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
int retval;
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
return retval;
}
if (signal_pending(current))
return -EINTR;
return -ENODEV;
}
static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
as = async_getcompleted(ps);
if (as) {
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
} else {
retval = (connected(ps) ? -EAGAIN : -ENODEV);
}
return retval;
}
#ifdef CONFIG_COMPAT
static int proc_control_compat(struct usb_dev_state *ps,
struct usbdevfs_ctrltransfer32 __user *p32)
{
struct usbdevfs_ctrltransfer ctrl;
u32 udata;
if (copy_from_user(&ctrl, p32, sizeof(*p32) - sizeof(compat_caddr_t)) ||
get_user(udata, &p32->data))
return -EFAULT;
ctrl.data = compat_ptr(udata);
return do_proc_control(ps, &ctrl);
}
static int proc_bulk_compat(struct usb_dev_state *ps,
struct usbdevfs_bulktransfer32 __user *p32)
{
struct usbdevfs_bulktransfer bulk;
compat_caddr_t addr;
if (get_user(bulk.ep, &p32->ep) ||
get_user(bulk.len, &p32->len) ||
get_user(bulk.timeout, &p32->timeout) ||
get_user(addr, &p32->data))
return -EFAULT;
bulk.data = compat_ptr(addr);
return do_proc_bulk(ps, &bulk);
}
static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal32 ds;
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
ps->disccontext.sival_int = ds.context;
return 0;
}
static int get_urb32(struct usbdevfs_urb *kurb,
struct usbdevfs_urb32 __user *uurb)
{
struct usbdevfs_urb32 urb32;
if (copy_from_user(&urb32, uurb, sizeof(*uurb)))
return -EFAULT;
kurb->type = urb32.type;
kurb->endpoint = urb32.endpoint;
kurb->status = urb32.status;
kurb->flags = urb32.flags;
kurb->buffer = compat_ptr(urb32.buffer);
kurb->buffer_length = urb32.buffer_length;
kurb->actual_length = urb32.actual_length;
kurb->start_frame = urb32.start_frame;
kurb->number_of_packets = urb32.number_of_packets;
kurb->error_count = urb32.error_count;
kurb->signr = urb32.signr;
kurb->usercontext = compat_ptr(urb32.usercontext);
return 0;
}
static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
sigval_t userurb_sigval;
if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg))
return -EFAULT;
memset(&userurb_sigval, 0, sizeof(userurb_sigval));
userurb_sigval.sival_int = ptr_to_compat(arg);
return proc_do_submiturb(ps, &uurb,
((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc,
arg, userurb_sigval);
}
static int processcompl_compat(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
struct usbdevfs_urb32 __user *userurb = as->userurb;
void __user *addr = as->userurb;
unsigned int i;
compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
return -EFAULT;
}
if (put_user(as->status, &userurb->status))
return -EFAULT;
if (put_user(urb->actual_length, &userurb->actual_length))
return -EFAULT;
if (put_user(urb->error_count, &userurb->error_count))
return -EFAULT;
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for (i = 0; i < urb->number_of_packets; i++) {
if (put_user(urb->iso_frame_desc[i].actual_length,
&userurb->iso_frame_desc[i].actual_length))
return -EFAULT;
if (put_user(urb->iso_frame_desc[i].status,
&userurb->iso_frame_desc[i].status))
return -EFAULT;
}
}
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
}
static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
int retval;
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
return retval;
}
if (signal_pending(current))
return -EINTR;
return -ENODEV;
}
static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
as = async_getcompleted(ps);
if (as) {
snoop(&ps->dev->dev, "reap %px\n", as->userurb);
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
} else {
retval = (connected(ps) ? -EAGAIN : -ENODEV);
}
return retval;
}
#endif
static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal ds;
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
ps->disccontext.sival_ptr = ds.context;
return 0;
}
static int proc_claiminterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
if (get_user(ifnum, (unsigned int __user *)arg))
return -EFAULT;
return claimintf(ps, ifnum);
}
static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
{
unsigned int ifnum;
int ret;
if (get_user(ifnum, (unsigned int __user *)arg))
return -EFAULT;
ret = releaseintf(ps, ifnum);
if (ret < 0)
return ret;
destroy_async_on_interface(ps, ifnum);
return 0;
}
static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
{
int size;
void *buf = NULL;
int retval = 0;
struct usb_interface *intf = NULL;
struct usb_driver *driver = NULL;
if (ps->privileges_dropped)
return -EACCES;
if (!connected(ps))
return -ENODEV;
/* alloc buffer */
size = _IOC_SIZE(ctl->ioctl_code);
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) {
if (copy_from_user(buf, ctl->data, size)) {
kfree(buf);
return -EFAULT;
}
} else {
memset(buf, 0, size);
}
}
if (ps->dev->state != USB_STATE_CONFIGURED)
retval = -EHOSTUNREACH;
else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno)))
retval = -EINVAL;
else switch (ctl->ioctl_code) {
/* disconnect kernel driver from interface */
case USBDEVFS_DISCONNECT:
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
dev_dbg(&intf->dev, "disconnect by usbfs\n");
usb_driver_release_interface(driver, intf);
} else
retval = -ENODATA;
break;
/* let kernel drivers try to (re)bind to the interface */
case USBDEVFS_CONNECT:
if (!intf->dev.driver)
retval = device_attach(&intf->dev);
else
retval = -EBUSY;
break;
/* talk directly to the interface's driver */
default:
if (intf->dev.driver)
driver = to_usb_driver(intf->dev.driver);
if (driver == NULL || driver->unlocked_ioctl == NULL) {
retval = -ENOTTY;
} else {
retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
}
/* cleanup and return */
if (retval >= 0
&& (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0
&& size > 0
&& copy_to_user(ctl->data, buf, size) != 0)
retval = -EFAULT;
kfree(buf);
return retval;
}
static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_ioctl ctrl;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
return proc_ioctl(ps, &ctrl);
}
#ifdef CONFIG_COMPAT
static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg)
{
struct usbdevfs_ioctl32 ioc32;
struct usbdevfs_ioctl ctrl;
if (copy_from_user(&ioc32, compat_ptr(arg), sizeof(ioc32)))
return -EFAULT;
ctrl.ifno = ioc32.ifno;
ctrl.ioctl_code = ioc32.ioctl_code;
ctrl.data = compat_ptr(ioc32.data);
return proc_ioctl(ps, &ctrl);
}
#endif
static int proc_claim_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
int rc;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
rc = usb_hub_claim_port(ps->dev, portnum, ps);
if (rc == 0)
snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n",
portnum, task_pid_nr(current), current->comm);
return rc;
}
static int proc_release_port(struct usb_dev_state *ps, void __user *arg)
{
unsigned portnum;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
return usb_hub_release_port(ps->dev, portnum, ps);
}
static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
{
__u32 caps;
caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP |
USBDEVFS_CAP_DROP_PRIVILEGES |
USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND;
if (!ps->dev->bus->no_stop_on_short)
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
if (ps->dev->bus->sg_tablesize)
caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER;
if (put_user(caps, (__u32 __user *)arg))
return -EFAULT;
return 0;
}
static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnect_claim dc;
struct usb_interface *intf;
if (copy_from_user(&dc, arg, sizeof(dc)))
return -EFAULT;
intf = usb_ifnum_to_if(ps->dev, dc.interface);
if (!intf)
return -EINVAL;
if (intf->dev.driver) {
struct usb_driver *driver = to_usb_driver(intf->dev.driver);
if (ps->privileges_dropped)
return -EACCES;
if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) &&
strncmp(dc.driver, intf->dev.driver->name,
sizeof(dc.driver)) != 0)
return -EBUSY;
if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_EXCEPT_DRIVER) &&
strncmp(dc.driver, intf->dev.driver->name,
sizeof(dc.driver)) == 0)
return -EBUSY;
dev_dbg(&intf->dev, "disconnect by usbfs\n");
usb_driver_release_interface(driver, intf);
}
return claimintf(ps, dc.interface);
}
static int proc_alloc_streams(struct usb_dev_state *ps, void __user *arg)
{
unsigned num_streams, num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, &num_streams, &num_eps,
&eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_alloc_streams(intf, eps, num_eps, num_streams, GFP_KERNEL);
kfree(eps);
return r;
}
static int proc_free_streams(struct usb_dev_state *ps, void __user *arg)
{
unsigned num_eps;
struct usb_host_endpoint **eps;
struct usb_interface *intf;
int r;
r = parse_usbdevfs_streams(ps, arg, NULL, &num_eps, &eps, &intf);
if (r)
return r;
destroy_async_on_interface(ps,
intf->altsetting[0].desc.bInterfaceNumber);
r = usb_free_streams(intf, eps, num_eps, GFP_KERNEL);
kfree(eps);
return r;
}
static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg)
{
u32 data;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
/* This is a one way operation. Once privileges are
* dropped, you cannot regain them. You may however reissue
* this ioctl to shrink the allowed interfaces mask.
*/
ps->interface_allowed_mask &= data;
ps->privileges_dropped = true;
return 0;
}
static int proc_forbid_suspend(struct usb_dev_state *ps)
{
int ret = 0;
if (ps->suspend_allowed) {
ret = usb_autoresume_device(ps->dev);
if (ret == 0)
ps->suspend_allowed = false;
else if (ret != -ENODEV)
ret = -EIO;
}
return ret;
}
static int proc_allow_suspend(struct usb_dev_state *ps)
{
if (!connected(ps))
return -ENODEV;
WRITE_ONCE(ps->not_yet_resumed, 1);
if (!ps->suspend_allowed) {
usb_autosuspend_device(ps->dev);
ps->suspend_allowed = true;
}
return 0;
}
static int proc_wait_for_resume(struct usb_dev_state *ps)
{
int ret;
usb_unlock_device(ps->dev);
ret = wait_event_interruptible(ps->wait_for_resume,
READ_ONCE(ps->not_yet_resumed) == 0);
usb_lock_device(ps->dev);
if (ret != 0)
return -EINTR;
return proc_forbid_suspend(ps);
}
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
* changing. But there's no mechanism to ensure that...
*/
static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
struct usb_dev_state *ps = file->private_data;
struct inode *inode = file_inode(file);
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
usb_lock_device(dev);
/* Reap operations are allowed even after disconnection */
switch (cmd) {
case USBDEVFS_REAPURB:
snoop(&dev->dev, "%s: REAPURB\n", __func__);
ret = proc_reapurb(ps, p);
goto done;
case USBDEVFS_REAPURBNDELAY:
snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
ret = proc_reapurbnonblock(ps, p);
goto done;
#ifdef CONFIG_COMPAT
case USBDEVFS_REAPURB32:
snoop(&dev->dev, "%s: REAPURB32\n", __func__);
ret = proc_reapurb_compat(ps, p);
goto done;
case USBDEVFS_REAPURBNDELAY32:
snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
ret = proc_reapurbnonblock_compat(ps, p);
goto done;
#endif
}
if (!connected(ps)) {
usb_unlock_device(dev);
return -ENODEV;
}
switch (cmd) {
case USBDEVFS_CONTROL:
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_RESET:
snoop(&dev->dev, "%s: RESET\n", __func__);
ret = proc_resetdevice(ps);
break;
case USBDEVFS_CLEAR_HALT:
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_GETDRIVER:
snoop(&dev->dev, "%s: GETDRIVER\n", __func__);
ret = proc_getdriver(ps, p);
break;
case USBDEVFS_CONNECTINFO:
snoop(&dev->dev, "%s: CONNECTINFO\n", __func__);
ret = proc_connectinfo(ps, p);
break;
case USBDEVFS_SETINTERFACE:
snoop(&dev->dev, "%s: SETINTERFACE\n", __func__);
ret = proc_setintf(ps, p);
break;
case USBDEVFS_SETCONFIGURATION:
snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__);
ret = proc_setconfig(ps, p);
break;
case USBDEVFS_SUBMITURB:
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
#ifdef CONFIG_COMPAT
case USBDEVFS_CONTROL32:
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_DISCSIGNAL32:
snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__);
ret = proc_disconnectsignal_compat(ps, p);
break;
case USBDEVFS_SUBMITURB32:
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_IOCTL32:
snoop(&dev->dev, "%s: IOCTL32\n", __func__);
ret = proc_ioctl_compat(ps, ptr_to_compat(p));
break;
#endif
case USBDEVFS_DISCARDURB:
snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
ret = proc_unlinkurb(ps, p);
break;
case USBDEVFS_DISCSIGNAL:
snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
ret = proc_disconnectsignal(ps, p);
break;
case USBDEVFS_CLAIMINTERFACE:
snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__);
ret = proc_claiminterface(ps, p);
break;
case USBDEVFS_RELEASEINTERFACE:
snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__);
ret = proc_releaseinterface(ps, p);
break;
case USBDEVFS_IOCTL:
snoop(&dev->dev, "%s: IOCTL\n", __func__);
ret = proc_ioctl_default(ps, p);
break;
case USBDEVFS_CLAIM_PORT:
snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__);
ret = proc_claim_port(ps, p);
break;
case USBDEVFS_RELEASE_PORT:
snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__);
ret = proc_release_port(ps, p);
break;
case USBDEVFS_GET_CAPABILITIES:
ret = proc_get_capabilities(ps, p);
break;
case USBDEVFS_DISCONNECT_CLAIM:
ret = proc_disconnect_claim(ps, p);
break;
case USBDEVFS_ALLOC_STREAMS:
ret = proc_alloc_streams(ps, p);
break;
case USBDEVFS_FREE_STREAMS:
ret = proc_free_streams(ps, p);
break;
case USBDEVFS_DROP_PRIVILEGES:
ret = proc_drop_privileges(ps, p);
break;
case USBDEVFS_GET_SPEED:
ret = ps->dev->speed;
break;
case USBDEVFS_FORBID_SUSPEND:
ret = proc_forbid_suspend(ps);
break;
case USBDEVFS_ALLOW_SUSPEND:
ret = proc_allow_suspend(ps);
break;
case USBDEVFS_WAIT_FOR_RESUME:
ret = proc_wait_for_resume(ps);
break;
}
/* Handle variable-length commands */
switch (cmd & ~IOCSIZE_MASK) {
case USBDEVFS_CONNINFO_EX(0):
ret = proc_conninfo_ex(ps, p, _IOC_SIZE(cmd));
break;
}
done:
usb_unlock_device(dev);
if (ret >= 0)
inode->i_atime = current_time(inode);
return ret;
}
static long usbdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
return ret;
}
/* No kernel lock - fine */
static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct usb_dev_state *ps = file->private_data;
__poll_t mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
mask |= EPOLLOUT | EPOLLWRNORM;
if (!connected(ps))
mask |= EPOLLHUP;
if (list_empty(&ps->list))
mask |= EPOLLERR;
return mask;
}
const struct file_operations usbdev_file_operations = {
.owner = THIS_MODULE,
.llseek = no_seek_end_llseek,
.read = usbdev_read,
.poll = usbdev_poll,
.unlocked_ioctl = usbdev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.mmap = usbdev_mmap,
.open = usbdev_open,
.release = usbdev_release,
};
static void usbdev_remove(struct usb_device *udev)
{
struct usb_dev_state *ps;
/* Protect against simultaneous resume */
mutex_lock(&usbfs_mutex);
while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
WRITE_ONCE(ps->not_yet_resumed, 0);
wake_up_all(&ps->wait_for_resume);
list_del_init(&ps->list);
if (ps->discsignr)
kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext,
ps->disc_pid, ps->cred);
}
mutex_unlock(&usbfs_mutex);
}
static int usbdev_notify(struct notifier_block *self,
unsigned long action, void *dev)
{
switch (action) {
case USB_DEVICE_ADD:
break;
case USB_DEVICE_REMOVE:
usbdev_remove(dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block usbdev_nb = {
.notifier_call = usbdev_notify,
};
static struct cdev usb_device_cdev;
int __init usb_devio_init(void)
{
int retval;
retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX,
"usb_device");
if (retval) {
printk(KERN_ERR "Unable to register minors for usb_device\n");
goto out;
}
cdev_init(&usb_device_cdev, &usbdev_file_operations);
retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX);
if (retval) {
printk(KERN_ERR "Unable to get usb_device major %d\n",
USB_DEVICE_MAJOR);
goto error_cdev;
}
usb_register_notify(&usbdev_nb);
out:
return retval;
error_cdev:
unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
goto out;
}
void usb_devio_cleanup(void)
{
usb_unregister_notify(&usbdev_nb);
cdev_del(&usb_device_cdev);
unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
}
| linux-master | drivers/usb/core/devio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DMA memory management for framework level HCD code (hc_driver)
*
* This implementation plugs in through generic "usb_bus" level methods,
* and should work with all USB controllers, regardless of bus type.
*
* Released under the GPLv2 only.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/genalloc.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
/*
* DMA-Coherent Buffers
*/
/* FIXME tune these based on pool statistics ... */
static size_t pool_max[HCD_BUFFER_POOLS] = {
32, 128, 512, 2048,
};
void __init usb_init_pool_max(void)
{
/*
* The pool_max values must never be smaller than
* ARCH_DMA_MINALIGN.
*/
if (ARCH_DMA_MINALIGN <= 32)
; /* Original value is okay */
else if (ARCH_DMA_MINALIGN <= 64)
pool_max[0] = 64;
else if (ARCH_DMA_MINALIGN <= 128)
pool_max[0] = 0; /* Don't use this pool */
else
BUILD_BUG(); /* We don't allow this */
}
/* SETUP primitives */
/**
* hcd_buffer_create - initialize buffer pools
* @hcd: the bus whose buffer pools are to be initialized
*
* Context: task context, might sleep
*
* Call this as part of initializing a host controller that uses the dma
* memory allocators. It initializes some pools of dma-coherent memory that
* will be shared by all drivers using that controller.
*
* Call hcd_buffer_destroy() to clean up after using those pools.
*
* Return: 0 if successful. A negative errno value otherwise.
*/
int hcd_buffer_create(struct usb_hcd *hcd)
{
char name[16];
int i, size;
if (hcd->localmem_pool || !hcd_uses_dma(hcd))
return 0;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
size = pool_max[i];
if (!size)
continue;
snprintf(name, sizeof(name), "buffer-%d", size);
hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
size, size, 0);
if (!hcd->pool[i]) {
hcd_buffer_destroy(hcd);
return -ENOMEM;
}
}
return 0;
}
/**
* hcd_buffer_destroy - deallocate buffer pools
* @hcd: the bus whose buffer pools are to be destroyed
*
* Context: task context, might sleep
*
* This frees the buffer pools created by hcd_buffer_create().
*/
void hcd_buffer_destroy(struct usb_hcd *hcd)
{
int i;
if (!IS_ENABLED(CONFIG_HAS_DMA))
return;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
dma_pool_destroy(hcd->pool[i]);
hcd->pool[i] = NULL;
}
}
/* sometimes alloc/free could use kmalloc with GFP_DMA, for
* better sharing and to leverage mm/slab.c intelligence.
*/
void *hcd_buffer_alloc(
struct usb_bus *bus,
size_t size,
gfp_t mem_flags,
dma_addr_t *dma
)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
int i;
if (size == 0)
return NULL;
if (hcd->localmem_pool)
return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
/* some USB hosts just use PIO */
if (!hcd_uses_dma(hcd)) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
}
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max[i])
return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
}
return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
}
void hcd_buffer_free(
struct usb_bus *bus,
size_t size,
void *addr,
dma_addr_t dma
)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
int i;
if (!addr)
return;
if (hcd->localmem_pool) {
gen_pool_free(hcd->localmem_pool, (unsigned long)addr, size);
return;
}
if (!hcd_uses_dma(hcd)) {
kfree(addr);
return;
}
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
if (size <= pool_max[i]) {
dma_pool_free(hcd->pool[i], addr, dma);
return;
}
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma)
{
if (size == 0)
return NULL;
if (hcd->localmem_pool)
return gen_pool_dma_alloc_align(hcd->localmem_pool,
size, dma, PAGE_SIZE);
/* some USB hosts just use PIO */
if (!hcd_uses_dma(hcd)) {
*dma = DMA_MAPPING_ERROR;
return (void *)__get_free_pages(mem_flags,
get_order(size));
}
return dma_alloc_coherent(hcd->self.sysdev,
size, dma, mem_flags);
}
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma)
{
if (!addr)
return;
if (hcd->localmem_pool) {
gen_pool_free(hcd->localmem_pool,
(unsigned long)addr, size);
return;
}
if (!hcd_uses_dma(hcd)) {
free_pages((unsigned long)addr, get_order(size));
return;
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
| linux-master | drivers/usb/core/buffer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/kmsan.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/hcd.h>
#include <linux/scatterlist.h>
#define to_urb(d) container_of(d, struct urb, kref)
static void urb_destroy(struct kref *kref)
{
struct urb *urb = to_urb(kref);
if (urb->transfer_flags & URB_FREE_BUFFER)
kfree(urb->transfer_buffer);
kfree(urb);
}
/**
* usb_init_urb - initializes a urb so that it can be used by a USB driver
* @urb: pointer to the urb to initialize
*
* Initializes a urb so that the USB subsystem can use it properly.
*
* If a urb is created with a call to usb_alloc_urb() it is not
* necessary to call this function. Only use this if you allocate the
* space for a struct urb on your own. If you call this function, be
* careful when freeing the memory for your urb that it is no longer in
* use by the USB core.
*
* Only use this function if you _really_ understand what you are doing.
*/
void usb_init_urb(struct urb *urb)
{
if (urb) {
memset(urb, 0, sizeof(*urb));
kref_init(&urb->kref);
INIT_LIST_HEAD(&urb->urb_list);
INIT_LIST_HEAD(&urb->anchor_list);
}
}
EXPORT_SYMBOL_GPL(usb_init_urb);
/**
* usb_alloc_urb - creates a new urb for a USB driver to use
* @iso_packets: number of iso packets for this urb
* @mem_flags: the type of memory to allocate, see kmalloc() for a list of
* valid options for this.
*
* Creates an urb for the USB driver to use, initializes a few internal
* structures, increments the usage counter, and returns a pointer to it.
*
* If the driver want to use this urb for interrupt, control, or bulk
* endpoints, pass '0' as the number of iso packets.
*
* The driver must call usb_free_urb() when it is finished with the urb.
*
* Return: A pointer to the new urb, or %NULL if no memory is available.
*/
struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
{
struct urb *urb;
urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets),
mem_flags);
if (!urb)
return NULL;
usb_init_urb(urb);
return urb;
}
EXPORT_SYMBOL_GPL(usb_alloc_urb);
/**
* usb_free_urb - frees the memory used by a urb when all users of it are finished
* @urb: pointer to the urb to free, may be NULL
*
* Must be called when a user of a urb is finished with it. When the last user
* of the urb calls this function, the memory of the urb is freed.
*
* Note: The transfer buffer associated with the urb is not freed unless the
* URB_FREE_BUFFER transfer flag is set.
*/
void usb_free_urb(struct urb *urb)
{
if (urb)
kref_put(&urb->kref, urb_destroy);
}
EXPORT_SYMBOL_GPL(usb_free_urb);
/**
* usb_get_urb - increments the reference count of the urb
* @urb: pointer to the urb to modify, may be NULL
*
* This must be called whenever a urb is transferred from a device driver to a
* host controller driver. This allows proper reference counting to happen
* for urbs.
*
* Return: A pointer to the urb with the incremented reference counter.
*/
struct urb *usb_get_urb(struct urb *urb)
{
if (urb)
kref_get(&urb->kref);
return urb;
}
EXPORT_SYMBOL_GPL(usb_get_urb);
/**
* usb_anchor_urb - anchors an URB while it is processed
* @urb: pointer to the urb to anchor
* @anchor: pointer to the anchor
*
* This can be called to have access to URBs which are to be executed
* without bothering to track them
*/
void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
unsigned long flags;
spin_lock_irqsave(&anchor->lock, flags);
usb_get_urb(urb);
list_add_tail(&urb->anchor_list, &anchor->urb_list);
urb->anchor = anchor;
if (unlikely(anchor->poisoned))
atomic_inc(&urb->reject);
spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_anchor_urb);
static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
{
return atomic_read(&anchor->suspend_wakeups) == 0 &&
list_empty(&anchor->urb_list);
}
/* Callers must hold anchor->lock */
static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
{
urb->anchor = NULL;
list_del(&urb->anchor_list);
usb_put_urb(urb);
if (usb_anchor_check_wakeup(anchor))
wake_up(&anchor->wait);
}
/**
* usb_unanchor_urb - unanchors an URB
* @urb: pointer to the urb to anchor
*
* Call this to stop the system keeping track of this URB
*/
void usb_unanchor_urb(struct urb *urb)
{
unsigned long flags;
struct usb_anchor *anchor;
if (!urb)
return;
anchor = urb->anchor;
if (!anchor)
return;
spin_lock_irqsave(&anchor->lock, flags);
/*
* At this point, we could be competing with another thread which
* has the same intention. To protect the urb from being unanchored
* twice, only the winner of the race gets the job.
*/
if (likely(anchor == urb->anchor))
__usb_unanchor_urb(urb, anchor);
spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unanchor_urb);
/*-------------------------------------------------------------------*/
static const int pipetypes[4] = {
PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
};
/**
* usb_pipe_type_check - sanity check of a specific pipe for a usb device
* @dev: struct usb_device to be checked
* @pipe: pipe to check
*
* This performs a light-weight sanity check for the endpoint in the
* given usb device. It returns 0 if the pipe is valid for the specific usb
* device, otherwise a negative error code.
*/
int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe)
{
const struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(dev, pipe);
if (!ep)
return -EINVAL;
if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(usb_pipe_type_check);
/**
* usb_urb_ep_type_check - sanity check of endpoint in the given urb
* @urb: urb to be checked
*
* This performs a light-weight sanity check for the endpoint in the
* given urb. It returns 0 if the urb contains a valid endpoint, otherwise
* a negative error code.
*/
int usb_urb_ep_type_check(const struct urb *urb)
{
return usb_pipe_type_check(urb->dev, urb->pipe);
}
EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
/**
* usb_submit_urb - issue an asynchronous transfer request for an endpoint
* @urb: pointer to the urb describing the request
* @mem_flags: the type of memory to allocate, see kmalloc() for a list
* of valid options for this.
*
* This submits a transfer request, and transfers control of the URB
* describing that request to the USB subsystem. Request completion will
* be indicated later, asynchronously, by calling the completion handler.
* The three types of completion are success, error, and unlink
* (a software-induced fault, also called "request cancellation").
*
* URBs may be submitted in interrupt context.
*
* The caller must have correctly initialized the URB before submitting
* it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
* available to ensure that most fields are correctly initialized, for
* the particular kind of transfer, although they will not initialize
* any transfer flags.
*
* If the submission is successful, the complete() callback from the URB
* will be called exactly once, when the USB core and Host Controller Driver
* (HCD) are finished with the URB. When the completion function is called,
* control of the URB is returned to the device driver which issued the
* request. The completion handler may then immediately free or reuse that
* URB.
*
* With few exceptions, USB device drivers should never access URB fields
* provided by usbcore or the HCD until its complete() is called.
* The exceptions relate to periodic transfer scheduling. For both
* interrupt and isochronous urbs, as part of successful URB submission
* urb->interval is modified to reflect the actual transfer period used
* (normally some power of two units). And for isochronous urbs,
* urb->start_frame is modified to reflect when the URB's transfers were
* scheduled to start.
*
* Not all isochronous transfer scheduling policies will work, but most
* host controller drivers should easily handle ISO queues going from now
* until 10-200 msec into the future. Drivers should try to keep at
* least one or two msec of data in the queue; many controllers require
* that new transfers start at least 1 msec in the future when they are
* added. If the driver is unable to keep up and the queue empties out,
* the behavior for new submissions is governed by the URB_ISO_ASAP flag.
* If the flag is set, or if the queue is idle, then the URB is always
* assigned to the first available (and not yet expired) slot in the
* endpoint's schedule. If the flag is not set and the queue is active
* then the URB is always assigned to the next slot in the schedule
* following the end of the endpoint's previous URB, even if that slot is
* in the past. When a packet is assigned in this way to a slot that has
* already expired, the packet is not transmitted and the corresponding
* usb_iso_packet_descriptor's status field will return -EXDEV. If this
* would happen to all the packets in the URB, submission fails with a
* -EXDEV error code.
*
* For control endpoints, the synchronous usb_control_msg() call is
* often used (in non-interrupt context) instead of this call.
* That is often used through convenience wrappers, for the requests
* that are standardized in the USB 2.0 specification. For bulk
* endpoints, a synchronous usb_bulk_msg() call is available.
*
* Return:
* 0 on successful submissions. A negative error number otherwise.
*
* Request Queuing:
*
* URBs may be submitted to endpoints before previous ones complete, to
* minimize the impact of interrupt latencies and system overhead on data
* throughput. With that queuing policy, an endpoint's queue would never
* be empty. This is required for continuous isochronous data streams,
* and may also be required for some kinds of interrupt transfers. Such
* queuing also maximizes bandwidth utilization by letting USB controllers
* start work on later requests before driver software has finished the
* completion processing for earlier (successful) requests.
*
* As of Linux 2.6, all USB endpoint transfer queues support depths greater
* than one. This was previously a HCD-specific behavior, except for ISO
* transfers. Non-isochronous endpoint queues are inactive during cleanup
* after faults (transfer errors or cancellation).
*
* Reserved Bandwidth Transfers:
*
* Periodic transfers (interrupt or isochronous) are performed repeatedly,
* using the interval specified in the urb. Submitting the first urb to
* the endpoint reserves the bandwidth necessary to make those transfers.
* If the USB subsystem can't allocate sufficient bandwidth to perform
* the periodic request, submitting such a periodic request should fail.
*
* For devices under xHCI, the bandwidth is reserved at configuration time, or
* when the alt setting is selected. If there is not enough bus bandwidth, the
* configuration/alt setting request will fail. Therefore, submissions to
* periodic endpoints on devices under xHCI should never fail due to bandwidth
* constraints.
*
* Device drivers must explicitly request that repetition, by ensuring that
* some URB is always on the endpoint's queue (except possibly for short
* periods during completion callbacks). When there is no longer an urb
* queued, the endpoint's bandwidth reservation is canceled. This means
* drivers can use their completion handlers to ensure they keep bandwidth
* they need, by reinitializing and resubmitting the just-completed urb
* until the driver longer needs that periodic bandwidth.
*
* Memory Flags:
*
* The general rules for how to decide which mem_flags to use
* are the same as for kmalloc. There are four
* different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
* GFP_ATOMIC.
*
* GFP_NOFS is not ever used, as it has not been implemented yet.
*
* GFP_ATOMIC is used when
* (a) you are inside a completion handler, an interrupt, bottom half,
* tasklet or timer, or
* (b) you are holding a spinlock or rwlock (does not apply to
* semaphores), or
* (c) current->state != TASK_RUNNING, this is the case only after
* you've changed it.
*
* GFP_NOIO is used in the block io path and error handling of storage
* devices.
*
* All other situations use GFP_KERNEL.
*
* Some more specific rules for mem_flags can be inferred, such as
* (1) start_xmit, timeout, and receive methods of network drivers must
* use GFP_ATOMIC (they are called with a spinlock held);
* (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
* called with a spinlock held);
* (3) If you use a kernel thread with a network driver you must use
* GFP_NOIO, unless (b) or (c) apply;
* (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
* apply or your are in a storage driver's block io path;
* (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
* (6) changing firmware on a running storage or net device uses
* GFP_NOIO, unless b) or c) apply
*
*/
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
int xfertype, max;
struct usb_device *dev;
struct usb_host_endpoint *ep;
int is_out;
unsigned int allowed;
if (!urb || !urb->complete)
return -EINVAL;
if (urb->hcpriv) {
WARN_ONCE(1, "URB %pK submitted while active\n", urb);
return -EBUSY;
}
dev = urb->dev;
if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
return -ENODEV;
/* For now, get the endpoint from the pipe. Eventually drivers
* will be required to set urb->ep directly and we will eliminate
* urb->pipe.
*/
ep = usb_pipe_endpoint(dev, urb->pipe);
if (!ep)
return -ENOENT;
urb->ep = ep;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
/* Lots of sanity checks, so HCDs can rely on clean data
* and don't need to duplicate tests
*/
xfertype = usb_endpoint_type(&ep->desc);
if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
struct usb_ctrlrequest *setup =
(struct usb_ctrlrequest *) urb->setup_packet;
if (!setup)
return -ENOEXEC;
is_out = !(setup->bRequestType & USB_DIR_IN) ||
!setup->wLength;
dev_WARN_ONCE(&dev->dev, (usb_pipeout(urb->pipe) != is_out),
"BOGUS control dir, pipe %x doesn't match bRequestType %x\n",
urb->pipe, setup->bRequestType);
if (le16_to_cpu(setup->wLength) != urb->transfer_buffer_length) {
dev_dbg(&dev->dev, "BOGUS control len %d doesn't match transfer length %d\n",
le16_to_cpu(setup->wLength),
urb->transfer_buffer_length);
return -EBADR;
}
} else {
is_out = usb_endpoint_dir_out(&ep->desc);
}
/* Clear the internal flags and cache the direction for later use */
urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
URB_DMA_SG_COMBINED);
urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
kmsan_handle_urb(urb, is_out);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
max = usb_endpoint_maxp(&ep->desc);
if (max <= 0) {
dev_dbg(&dev->dev,
"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
__func__, max);
return -EMSGSIZE;
}
/* periodic transfers limit size per frame/uframe,
* but drivers only control those sizes for ISO.
* while we're checking, initialize return status.
*/
if (xfertype == USB_ENDPOINT_XFER_ISOC) {
int n, len;
/* SuperSpeed isoc endpoints have up to 16 bursts of up to
* 3 packets each
*/
if (dev->speed >= USB_SPEED_SUPER) {
int burst = 1 + ep->ss_ep_comp.bMaxBurst;
int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
max *= burst;
max *= mult;
}
if (dev->speed == USB_SPEED_SUPER_PLUS &&
USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) {
struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp;
isoc_ep_comp = &ep->ssp_isoc_ep_comp;
max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval);
}
/* "high bandwidth" mode, 1-3 packets/uframe? */
if (dev->speed == USB_SPEED_HIGH)
max *= usb_endpoint_maxp_mult(&ep->desc);
if (urb->number_of_packets <= 0)
return -EINVAL;
for (n = 0; n < urb->number_of_packets; n++) {
len = urb->iso_frame_desc[n].length;
if (len < 0 || len > max)
return -EMSGSIZE;
urb->iso_frame_desc[n].status = -EXDEV;
urb->iso_frame_desc[n].actual_length = 0;
}
} else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint) {
struct scatterlist *sg;
int i;
for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
if (sg->length % max)
return -EINVAL;
}
/* the I/O buffer must be mapped/unmapped, except when length=0 */
if (urb->transfer_buffer_length > INT_MAX)
return -EMSGSIZE;
/*
* stuff that drivers shouldn't do, but which shouldn't
* cause problems in HCDs if they get it wrong.
*/
/* Check that the pipe's type matches the endpoint's type */
if (usb_pipe_type_check(urb->dev, urb->pipe))
dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
usb_pipetype(urb->pipe), pipetypes[xfertype]);
/* Check against a simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
URB_FREE_BUFFER);
switch (xfertype) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
if (is_out)
allowed |= URB_ZERO_PACKET;
fallthrough;
default: /* all non-iso endpoints */
if (!is_out)
allowed |= URB_SHORT_NOT_OK;
break;
case USB_ENDPOINT_XFER_ISOC:
allowed |= URB_ISO_ASAP;
break;
}
allowed &= urb->transfer_flags;
/* warn if submitter gave bogus flags */
if (allowed != urb->transfer_flags)
dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
urb->transfer_flags, allowed);
/*
* Force periodic transfer intervals to be legal values that are
* a power of two (so HCDs don't need to).
*
* FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
* supports different values... this uses EHCI/UHCI defaults (and
* EHCI can use smaller non-default values).
*/
switch (xfertype) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* too small? */
if (urb->interval <= 0)
return -EINVAL;
/* too big? */
switch (dev->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: /* units are 125us */
/* Handle up to 2^(16-1) microframes */
if (urb->interval > (1 << 15))
return -EINVAL;
max = 1 << 15;
break;
case USB_SPEED_HIGH: /* units are microframes */
/* NOTE usb handles 2^15 */
if (urb->interval > (1024 * 8))
urb->interval = 1024 * 8;
max = 1024 * 8;
break;
case USB_SPEED_FULL: /* units are frames/msec */
case USB_SPEED_LOW:
if (xfertype == USB_ENDPOINT_XFER_INT) {
if (urb->interval > 255)
return -EINVAL;
/* NOTE ohci only handles up to 32 */
max = 128;
} else {
if (urb->interval > 1024)
urb->interval = 1024;
/* NOTE usb and ohci handle up to 2^15 */
max = 1024;
}
break;
default:
return -EINVAL;
}
/* Round down to a power of 2, no more than max */
urb->interval = min(max, 1 << ilog2(urb->interval));
}
return usb_hcd_submit_urb(urb, mem_flags);
}
EXPORT_SYMBOL_GPL(usb_submit_urb);
/*-------------------------------------------------------------------*/
/**
* usb_unlink_urb - abort/cancel a transfer request for an endpoint
* @urb: pointer to urb describing a previously submitted request,
* may be NULL
*
* This routine cancels an in-progress request. URBs complete only once
* per submission, and may be canceled only once per submission.
* Successful cancellation means termination of @urb will be expedited
* and the completion handler will be called with a status code
* indicating that the request has been canceled (rather than any other
* code).
*
* Drivers should not call this routine or related routines, such as
* usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
* method has returned. The disconnect function should synchronize with
* a driver's I/O routines to insure that all URB-related activity has
* completed before it returns.
*
* This request is asynchronous, however the HCD might call the ->complete()
* callback during unlink. Therefore when drivers call usb_unlink_urb(), they
* must not hold any locks that may be taken by the completion function.
* Success is indicated by returning -EINPROGRESS, at which time the URB will
* probably not yet have been given back to the device driver. When it is
* eventually called, the completion function will see @urb->status ==
* -ECONNRESET.
* Failure is indicated by usb_unlink_urb() returning any other value.
* Unlinking will fail when @urb is not currently "linked" (i.e., it was
* never submitted, or it was unlinked before, or the hardware is already
* finished with it), even if the completion handler has not yet run.
*
* The URB must not be deallocated while this routine is running. In
* particular, when a driver calls this routine, it must insure that the
* completion handler cannot deallocate the URB.
*
* Return: -EINPROGRESS on success. See description for other values on
* failure.
*
* Unlinking and Endpoint Queues:
*
* [The behaviors and guarantees described below do not apply to virtual
* root hubs but only to endpoint queues for physical USB devices.]
*
* Host Controller Drivers (HCDs) place all the URBs for a particular
* endpoint in a queue. Normally the queue advances as the controller
* hardware processes each request. But when an URB terminates with an
* error its queue generally stops (see below), at least until that URB's
* completion routine returns. It is guaranteed that a stopped queue
* will not restart until all its unlinked URBs have been fully retired,
* with their completion routines run, even if that's not until some time
* after the original completion handler returns. The same behavior and
* guarantee apply when an URB terminates because it was unlinked.
*
* Bulk and interrupt endpoint queues are guaranteed to stop whenever an
* URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
* and -EREMOTEIO. Control endpoint queues behave the same way except
* that they are not guaranteed to stop for -EREMOTEIO errors. Queues
* for isochronous endpoints are treated differently, because they must
* advance at fixed rates. Such queues do not stop when an URB
* encounters an error or is unlinked. An unlinked isochronous URB may
* leave a gap in the stream of packets; it is undefined whether such
* gaps can be filled in.
*
* Note that early termination of an URB because a short packet was
* received will generate a -EREMOTEIO error if and only if the
* URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
* drivers can build deep queues for large or complex bulk transfers
* and clean them up reliably after any sort of aborted transfer by
* unlinking all pending URBs at the first fault.
*
* When a control URB terminates with an error other than -EREMOTEIO, it
* is quite likely that the status stage of the transfer will not take
* place.
*/
int usb_unlink_urb(struct urb *urb)
{
if (!urb)
return -EINVAL;
if (!urb->dev)
return -ENODEV;
if (!urb->ep)
return -EIDRM;
return usb_hcd_unlink_urb(urb, -ECONNRESET);
}
EXPORT_SYMBOL_GPL(usb_unlink_urb);
/**
* usb_kill_urb - cancel a transfer request and wait for it to finish
* @urb: pointer to URB describing a previously submitted request,
* may be NULL
*
* This routine cancels an in-progress request. It is guaranteed that
* upon return all completion handlers will have finished and the URB
* will be totally idle and available for reuse. These features make
* this an ideal way to stop I/O in a disconnect() callback or close()
* function. If the request has not already finished or been unlinked
* the completion handler will see urb->status == -ENOENT.
*
* While the routine is running, attempts to resubmit the URB will fail
* with error -EPERM. Thus even if the URB's completion handler always
* tries to resubmit, it will not succeed and the URB will become idle.
*
* The URB must not be deallocated while this routine is running. In
* particular, when a driver calls this routine, it must insure that the
* completion handler cannot deallocate the URB.
*
* This routine may not be used in an interrupt context (such as a bottom
* half or a completion handler), or when holding a spinlock, or in other
* situations where the caller can't schedule().
*
* This routine should not be called by a driver after its disconnect
* method has returned.
*/
void usb_kill_urb(struct urb *urb)
{
might_sleep();
if (!(urb && urb->dev && urb->ep))
return;
atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
atomic_dec(&urb->reject);
}
EXPORT_SYMBOL_GPL(usb_kill_urb);
/**
* usb_poison_urb - reliably kill a transfer and prevent further use of an URB
* @urb: pointer to URB describing a previously submitted request,
* may be NULL
*
* This routine cancels an in-progress request. It is guaranteed that
* upon return all completion handlers will have finished and the URB
* will be totally idle and cannot be reused. These features make
* this an ideal way to stop I/O in a disconnect() callback.
* If the request has not already finished or been unlinked
* the completion handler will see urb->status == -ENOENT.
*
* After and while the routine runs, attempts to resubmit the URB will fail
* with error -EPERM. Thus even if the URB's completion handler always
* tries to resubmit, it will not succeed and the URB will become idle.
*
* The URB must not be deallocated while this routine is running. In
* particular, when a driver calls this routine, it must insure that the
* completion handler cannot deallocate the URB.
*
* This routine may not be used in an interrupt context (such as a bottom
* half or a completion handler), or when holding a spinlock, or in other
* situations where the caller can't schedule().
*
* This routine should not be called by a driver after its disconnect
* method has returned.
*/
void usb_poison_urb(struct urb *urb)
{
might_sleep();
if (!urb)
return;
atomic_inc(&urb->reject);
/*
* Order the write of urb->reject above before the read
* of urb->use_count below. Pairs with the barriers in
* __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
*/
smp_mb__after_atomic();
if (!urb->dev || !urb->ep)
return;
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
}
EXPORT_SYMBOL_GPL(usb_poison_urb);
void usb_unpoison_urb(struct urb *urb)
{
if (!urb)
return;
atomic_dec(&urb->reject);
}
EXPORT_SYMBOL_GPL(usb_unpoison_urb);
/**
* usb_block_urb - reliably prevent further use of an URB
* @urb: pointer to URB to be blocked, may be NULL
*
* After the routine has run, attempts to resubmit the URB will fail
* with error -EPERM. Thus even if the URB's completion handler always
* tries to resubmit, it will not succeed and the URB will become idle.
*
* The URB must not be deallocated while this routine is running. In
* particular, when a driver calls this routine, it must insure that the
* completion handler cannot deallocate the URB.
*/
void usb_block_urb(struct urb *urb)
{
if (!urb)
return;
atomic_inc(&urb->reject);
}
EXPORT_SYMBOL_GPL(usb_block_urb);
/**
* usb_kill_anchored_urbs - kill all URBs associated with an anchor
* @anchor: anchor the requests are bound to
*
* This kills all outstanding URBs starting from the back of the queue,
* with guarantee that no completer callbacks will take place from the
* anchor after this function returns.
*
* This routine should not be called by a driver after its disconnect
* method has returned.
*/
void usb_kill_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
int surely_empty;
do {
spin_lock_irq(&anchor->lock);
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev,
struct urb, anchor_list);
/* make sure the URB isn't freed before we kill it */
usb_get_urb(victim);
spin_unlock_irq(&anchor->lock);
/* this will unanchor the URB */
usb_kill_urb(victim);
usb_put_urb(victim);
spin_lock_irq(&anchor->lock);
}
surely_empty = usb_anchor_check_wakeup(anchor);
spin_unlock_irq(&anchor->lock);
cpu_relax();
} while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
/**
* usb_poison_anchored_urbs - cease all traffic from an anchor
* @anchor: anchor the requests are bound to
*
* this allows all outstanding URBs to be poisoned starting
* from the back of the queue. Newly added URBs will also be
* poisoned
*
* This routine should not be called by a driver after its disconnect
* method has returned.
*/
void usb_poison_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
int surely_empty;
do {
spin_lock_irq(&anchor->lock);
anchor->poisoned = 1;
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev,
struct urb, anchor_list);
/* make sure the URB isn't freed before we kill it */
usb_get_urb(victim);
spin_unlock_irq(&anchor->lock);
/* this will unanchor the URB */
usb_poison_urb(victim);
usb_put_urb(victim);
spin_lock_irq(&anchor->lock);
}
surely_empty = usb_anchor_check_wakeup(anchor);
spin_unlock_irq(&anchor->lock);
cpu_relax();
} while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
/**
* usb_unpoison_anchored_urbs - let an anchor be used successfully again
* @anchor: anchor the requests are bound to
*
* Reverses the effect of usb_poison_anchored_urbs
* the anchor can be used normally after it returns
*/
void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
{
unsigned long flags;
struct urb *lazarus;
spin_lock_irqsave(&anchor->lock, flags);
list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
usb_unpoison_urb(lazarus);
}
anchor->poisoned = 0;
spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
/**
* usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
* @anchor: anchor the requests are bound to
*
* this allows all outstanding URBs to be unlinked starting
* from the back of the queue. This function is asynchronous.
* The unlinking is just triggered. It may happen after this
* function has returned.
*
* This routine should not be called by a driver after its disconnect
* method has returned.
*/
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
while ((victim = usb_get_from_anchor(anchor)) != NULL) {
usb_unlink_urb(victim);
usb_put_urb(victim);
}
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
/**
* usb_anchor_suspend_wakeups
* @anchor: the anchor you want to suspend wakeups on
*
* Call this to stop the last urb being unanchored from waking up any
* usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
* back path to delay waking up until after the completion handler has run.
*/
void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
{
if (anchor)
atomic_inc(&anchor->suspend_wakeups);
}
EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
/**
* usb_anchor_resume_wakeups
* @anchor: the anchor you want to resume wakeups on
*
* Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
* wake up any current waiters if the anchor is empty.
*/
void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
{
if (!anchor)
return;
atomic_dec(&anchor->suspend_wakeups);
if (usb_anchor_check_wakeup(anchor))
wake_up(&anchor->wait);
}
EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
/**
* usb_wait_anchor_empty_timeout - wait for an anchor to be unused
* @anchor: the anchor you want to become unused
* @timeout: how long you are willing to wait in milliseconds
*
* Call this is you want to be sure all an anchor's
* URBs have finished
*
* Return: Non-zero if the anchor became unused. Zero on timeout.
*/
int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
unsigned int timeout)
{
return wait_event_timeout(anchor->wait,
usb_anchor_check_wakeup(anchor),
msecs_to_jiffies(timeout));
}
EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
/**
* usb_get_from_anchor - get an anchor's oldest urb
* @anchor: the anchor whose urb you want
*
* This will take the oldest urb from an anchor,
* unanchor and return it
*
* Return: The oldest urb from @anchor, or %NULL if @anchor has no
* urbs associated with it.
*/
struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
{
struct urb *victim;
unsigned long flags;
spin_lock_irqsave(&anchor->lock, flags);
if (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.next, struct urb,
anchor_list);
usb_get_urb(victim);
__usb_unanchor_urb(victim, anchor);
} else {
victim = NULL;
}
spin_unlock_irqrestore(&anchor->lock, flags);
return victim;
}
EXPORT_SYMBOL_GPL(usb_get_from_anchor);
/**
* usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
* @anchor: the anchor whose urbs you want to unanchor
*
* use this to get rid of all an anchor's urbs
*/
void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
unsigned long flags;
int surely_empty;
do {
spin_lock_irqsave(&anchor->lock, flags);
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev,
struct urb, anchor_list);
__usb_unanchor_urb(victim, anchor);
}
surely_empty = usb_anchor_check_wakeup(anchor);
spin_unlock_irqrestore(&anchor->lock, flags);
cpu_relax();
} while (!surely_empty);
}
EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
/**
* usb_anchor_empty - is an anchor empty
* @anchor: the anchor you want to query
*
* Return: 1 if the anchor has no urbs associated with it.
*/
int usb_anchor_empty(struct usb_anchor *anchor)
{
return list_empty(&anchor->urb_list);
}
EXPORT_SYMBOL_GPL(usb_anchor_empty);
| linux-master | drivers/usb/core/urb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/sysfs.c
*
* (C) Copyright 2002 David Brownell
* (C) Copyright 2002,2004 Greg Kroah-Hartman
* (C) Copyright 2002,2004 IBM Corp.
*
* All of the sysfs file attributes for usb devices and interfaces.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/quirks.h>
#include <linux/of.h>
#include "usb.h"
/* Active configuration fields */
#define usb_actconfig_show(field, format_string) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
struct usb_host_config *actconfig; \
ssize_t rc; \
\
udev = to_usb_device(dev); \
rc = usb_lock_device_interruptible(udev); \
if (rc < 0) \
return -EINTR; \
actconfig = udev->actconfig; \
if (actconfig) \
rc = sysfs_emit(buf, format_string, \
actconfig->desc.field); \
usb_unlock_device(udev); \
return rc; \
} \
#define usb_actconfig_attr(field, format_string) \
usb_actconfig_show(field, format_string) \
static DEVICE_ATTR_RO(field)
usb_actconfig_attr(bNumInterfaces, "%2d\n");
usb_actconfig_attr(bmAttributes, "%2x\n");
static ssize_t bMaxPower_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
struct usb_host_config *actconfig;
ssize_t rc;
udev = to_usb_device(dev);
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
actconfig = udev->actconfig;
if (actconfig)
rc = sysfs_emit(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RO(bMaxPower);
static ssize_t configuration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
struct usb_host_config *actconfig;
ssize_t rc;
udev = to_usb_device(dev);
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
actconfig = udev->actconfig;
if (actconfig && actconfig->string)
rc = sysfs_emit(buf, "%s\n", actconfig->string);
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RO(configuration);
/* configuration value is always present, and r/w */
usb_actconfig_show(bConfigurationValue, "%u\n");
static ssize_t bConfigurationValue_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int config, value, rc;
if (sscanf(buf, "%d", &config) != 1 || config < -1 || config > 255)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
value = usb_set_configuration(udev, config);
usb_unlock_device(udev);
return (value < 0) ? value : count;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
bConfigurationValue_show, bConfigurationValue_store);
#ifdef CONFIG_OF
static ssize_t devspec_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct device_node *of_node = dev->of_node;
return sysfs_emit(buf, "%pOF\n", of_node);
}
static DEVICE_ATTR_RO(devspec);
#endif
/* String fields */
#define usb_string_attr(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
int retval; \
\
udev = to_usb_device(dev); \
retval = usb_lock_device_interruptible(udev); \
if (retval < 0) \
return -EINTR; \
retval = sysfs_emit(buf, "%s\n", udev->name); \
usb_unlock_device(udev); \
return retval; \
} \
static DEVICE_ATTR_RO(name)
usb_string_attr(product);
usb_string_attr(manufacturer);
usb_string_attr(serial);
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
char *speed;
udev = to_usb_device(dev);
switch (udev->speed) {
case USB_SPEED_LOW:
speed = "1.5";
break;
case USB_SPEED_UNKNOWN:
case USB_SPEED_FULL:
speed = "12";
break;
case USB_SPEED_HIGH:
speed = "480";
break;
case USB_SPEED_SUPER:
speed = "5000";
break;
case USB_SPEED_SUPER_PLUS:
if (udev->ssp_rate == USB_SSP_GEN_2x2)
speed = "20000";
else
speed = "10000";
break;
default:
speed = "unknown";
}
return sysfs_emit(buf, "%s\n", speed);
}
static DEVICE_ATTR_RO(speed);
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->rx_lanes);
}
static DEVICE_ATTR_RO(rx_lanes);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->tx_lanes);
}
static DEVICE_ATTR_RO(tx_lanes);
static ssize_t busnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->bus->busnum);
}
static DEVICE_ATTR_RO(busnum);
static ssize_t devnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->devnum);
}
static DEVICE_ATTR_RO(devnum);
static ssize_t devpath_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%s\n", udev->devpath);
}
static DEVICE_ATTR_RO(devpath);
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
u16 bcdUSB;
udev = to_usb_device(dev);
bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
return sysfs_emit(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff);
}
static DEVICE_ATTR_RO(version);
static ssize_t maxchild_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->maxchild);
}
static DEVICE_ATTR_RO(maxchild);
static ssize_t quirks_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "0x%x\n", udev->quirks);
}
static DEVICE_ATTR_RO(quirks);
static ssize_t avoid_reset_quirk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET));
}
static ssize_t avoid_reset_quirk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int val, rc;
if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
if (val)
udev->quirks |= USB_QUIRK_RESET;
else
udev->quirks &= ~USB_QUIRK_RESET;
usb_unlock_device(udev);
return count;
}
static DEVICE_ATTR_RW(avoid_reset_quirk);
static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", atomic_read(&udev->urbnum));
}
static DEVICE_ATTR_RO(urbnum);
static ssize_t ltm_capable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (usb_device_supports_ltm(to_usb_device(dev)))
return sysfs_emit(buf, "%s\n", "yes");
return sysfs_emit(buf, "%s\n", "no");
}
static DEVICE_ATTR_RO(ltm_capable);
#ifdef CONFIG_PM
static ssize_t persist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->persist_enabled);
}
static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int value, rc;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
udev->persist_enabled = !!value;
usb_unlock_device(udev);
return count;
}
static DEVICE_ATTR_RW(persist);
static int add_persist_attributes(struct device *dev)
{
int rc = 0;
if (is_usb_device(dev)) {
struct usb_device *udev = to_usb_device(dev);
/* Hubs are automatically enabled for USB_PERSIST,
* no point in creating the attribute file.
*/
if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
rc = sysfs_add_file_to_group(&dev->kobj,
&dev_attr_persist.attr,
power_group_name);
}
return rc;
}
static void remove_persist_attributes(struct device *dev)
{
sysfs_remove_file_from_group(&dev->kobj,
&dev_attr_persist.attr,
power_group_name);
}
static ssize_t connected_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sysfs_emit(buf, "%u\n",
jiffies_to_msecs(jiffies - udev->connect_time));
}
static DEVICE_ATTR_RO(connected_duration);
/*
* If the device is resumed, the last time the device was suspended has
* been pre-subtracted from active_duration. We add the current time to
* get the duration that the device was actually active.
*
* If the device is suspended, the active_duration is up-to-date.
*/
static ssize_t active_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
int duration;
if (udev->state != USB_STATE_SUSPENDED)
duration = jiffies_to_msecs(jiffies + udev->active_duration);
else
duration = jiffies_to_msecs(udev->active_duration);
return sysfs_emit(buf, "%u\n", duration);
}
static DEVICE_ATTR_RO(active_duration);
static ssize_t autosuspend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay / 1000);
}
static ssize_t autosuspend_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
int value;
if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/1000 ||
value <= -INT_MAX/1000)
return -EINVAL;
pm_runtime_set_autosuspend_delay(dev, value * 1000);
return count;
}
static DEVICE_ATTR_RW(autosuspend);
static const char on_string[] = "on";
static const char auto_string[] = "auto";
static void warn_level(void)
{
static int level_warned;
if (!level_warned) {
level_warned = 1;
printk(KERN_WARNING "WARNING! power/level is deprecated; "
"use power/control instead\n");
}
}
static ssize_t level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
warn_level();
if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto)
p = on_string;
return sysfs_emit(buf, "%s\n", p);
}
static ssize_t level_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
int rc = count;
int rv;
warn_level();
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
rv = usb_lock_device_interruptible(udev);
if (rv < 0)
return -EINTR;
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0)
usb_disable_autosuspend(udev);
else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0)
usb_enable_autosuspend(udev);
else
rc = -EINVAL;
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RW(level);
static ssize_t usb2_hardware_lpm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
if (udev->usb2_hw_lpm_allowed == 1)
p = "enabled";
else
p = "disabled";
return sysfs_emit(buf, "%s\n", p);
}
static ssize_t usb2_hardware_lpm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
bool value;
int ret;
ret = usb_lock_device_interruptible(udev);
if (ret < 0)
return -EINTR;
ret = kstrtobool(buf, &value);
if (!ret) {
udev->usb2_hw_lpm_allowed = value;
if (value)
ret = usb_enable_usb2_hardware_lpm(udev);
else
ret = usb_disable_usb2_hardware_lpm(udev);
}
usb_unlock_device(udev);
if (!ret)
return count;
return ret;
}
static DEVICE_ATTR_RW(usb2_hardware_lpm);
static ssize_t usb2_lpm_l1_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->l1_params.timeout);
}
static ssize_t usb2_lpm_l1_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u16 timeout;
if (kstrtou16(buf, 0, &timeout))
return -EINVAL;
udev->l1_params.timeout = timeout;
return count;
}
static DEVICE_ATTR_RW(usb2_lpm_l1_timeout);
static ssize_t usb2_lpm_besl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sysfs_emit(buf, "%d\n", udev->l1_params.besl);
}
static ssize_t usb2_lpm_besl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u8 besl;
if (kstrtou8(buf, 0, &besl) || besl > 15)
return -EINVAL;
udev->l1_params.besl = besl;
return count;
}
static DEVICE_ATTR_RW(usb2_lpm_besl);
static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
int rc;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
if (udev->usb3_lpm_u1_enabled)
p = "enabled";
else
p = "disabled";
usb_unlock_device(udev);
return sysfs_emit(buf, "%s\n", p);
}
static DEVICE_ATTR_RO(usb3_hardware_lpm_u1);
static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
int rc;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
return -EINTR;
if (udev->usb3_lpm_u2_enabled)
p = "enabled";
else
p = "disabled";
usb_unlock_device(udev);
return sysfs_emit(buf, "%s\n", p);
}
static DEVICE_ATTR_RO(usb3_hardware_lpm_u2);
static struct attribute *usb2_hardware_lpm_attr[] = {
&dev_attr_usb2_hardware_lpm.attr,
&dev_attr_usb2_lpm_l1_timeout.attr,
&dev_attr_usb2_lpm_besl.attr,
NULL,
};
static const struct attribute_group usb2_hardware_lpm_attr_group = {
.name = power_group_name,
.attrs = usb2_hardware_lpm_attr,
};
static struct attribute *usb3_hardware_lpm_attr[] = {
&dev_attr_usb3_hardware_lpm_u1.attr,
&dev_attr_usb3_hardware_lpm_u2.attr,
NULL,
};
static const struct attribute_group usb3_hardware_lpm_attr_group = {
.name = power_group_name,
.attrs = usb3_hardware_lpm_attr,
};
static struct attribute *power_attrs[] = {
&dev_attr_autosuspend.attr,
&dev_attr_level.attr,
&dev_attr_connected_duration.attr,
&dev_attr_active_duration.attr,
NULL,
};
static const struct attribute_group power_attr_group = {
.name = power_group_name,
.attrs = power_attrs,
};
static int add_power_attributes(struct device *dev)
{
int rc = 0;
if (is_usb_device(dev)) {
struct usb_device *udev = to_usb_device(dev);
rc = sysfs_merge_group(&dev->kobj, &power_attr_group);
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
if ((udev->speed == USB_SPEED_SUPER ||
udev->speed == USB_SPEED_SUPER_PLUS) &&
udev->lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb3_hardware_lpm_attr_group);
}
return rc;
}
static void remove_power_attributes(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
sysfs_unmerge_group(&dev->kobj, &power_attr_group);
}
#else
#define add_persist_attributes(dev) 0
#define remove_persist_attributes(dev) do {} while (0)
#define add_power_attributes(dev) 0
#define remove_power_attributes(dev) do {} while (0)
#endif /* CONFIG_PM */
/* Descriptor fields */
#define usb_descriptor_attr_le16(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
return sysfs_emit(buf, format_string, \
le16_to_cpu(udev->descriptor.field)); \
} \
static DEVICE_ATTR_RO(field)
usb_descriptor_attr_le16(idVendor, "%04x\n");
usb_descriptor_attr_le16(idProduct, "%04x\n");
usb_descriptor_attr_le16(bcdDevice, "%04x\n");
#define usb_descriptor_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
return sysfs_emit(buf, format_string, udev->descriptor.field); \
} \
static DEVICE_ATTR_RO(field)
usb_descriptor_attr(bDeviceClass, "%02x\n");
usb_descriptor_attr(bDeviceSubClass, "%02x\n");
usb_descriptor_attr(bDeviceProtocol, "%02x\n");
usb_descriptor_attr(bNumConfigurations, "%d\n");
usb_descriptor_attr(bMaxPacketSize0, "%d\n");
/* show if the device is authorized (1) or not (0) */
static ssize_t authorized_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *usb_dev = to_usb_device(dev);
return sysfs_emit(buf, "%u\n", usb_dev->authorized);
}
/*
* Authorize a device to be used in the system
*
* Writing a 0 deauthorizes the device, writing a 1 authorizes it.
*/
static ssize_t authorized_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
unsigned val;
result = sscanf(buf, "%u\n", &val);
if (result != 1)
result = -EINVAL;
else if (val == 0)
result = usb_deauthorize_device(usb_dev);
else
result = usb_authorize_device(usb_dev);
return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
authorized_show, authorized_store);
/* "Safely remove a device" */
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int rc = 0;
usb_lock_device(udev);
if (udev->state != USB_STATE_NOTATTACHED) {
/* To avoid races, first unconfigure and then remove */
usb_set_configuration(udev, -1);
rc = usb_remove_device(udev);
}
if (rc == 0)
rc = count;
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(remove, S_IWUSR, NULL, remove_store);
static struct attribute *dev_attrs[] = {
/* current configuration's attributes */
&dev_attr_configuration.attr,
&dev_attr_bNumInterfaces.attr,
&dev_attr_bConfigurationValue.attr,
&dev_attr_bmAttributes.attr,
&dev_attr_bMaxPower.attr,
/* device attributes */
&dev_attr_urbnum.attr,
&dev_attr_idVendor.attr,
&dev_attr_idProduct.attr,
&dev_attr_bcdDevice.attr,
&dev_attr_bDeviceClass.attr,
&dev_attr_bDeviceSubClass.attr,
&dev_attr_bDeviceProtocol.attr,
&dev_attr_bNumConfigurations.attr,
&dev_attr_bMaxPacketSize0.attr,
&dev_attr_speed.attr,
&dev_attr_rx_lanes.attr,
&dev_attr_tx_lanes.attr,
&dev_attr_busnum.attr,
&dev_attr_devnum.attr,
&dev_attr_devpath.attr,
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
&dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
&dev_attr_ltm_capable.attr,
#ifdef CONFIG_OF
&dev_attr_devspec.attr,
#endif
NULL,
};
static const struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
/* When modifying this list, be sure to modify dev_string_attrs_are_visible()
* accordingly.
*/
static struct attribute *dev_string_attrs[] = {
&dev_attr_manufacturer.attr,
&dev_attr_product.attr,
&dev_attr_serial.attr,
NULL
};
static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_device *udev = to_usb_device(dev);
if (a == &dev_attr_manufacturer.attr) {
if (udev->manufacturer == NULL)
return 0;
} else if (a == &dev_attr_product.attr) {
if (udev->product == NULL)
return 0;
} else if (a == &dev_attr_serial.attr) {
if (udev->serial == NULL)
return 0;
}
return a->mode;
}
static const struct attribute_group dev_string_attr_grp = {
.attrs = dev_string_attrs,
.is_visible = dev_string_attrs_are_visible,
};
const struct attribute_group *usb_device_groups[] = {
&dev_attr_grp,
&dev_string_attr_grp,
NULL
};
/* Binary descriptors */
static ssize_t
read_descriptors(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_device *udev = to_usb_device(dev);
size_t nleft = count;
size_t srclen, n;
int cfgno;
void *src;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
*/
for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
nleft > 0; ++cfgno) {
if (cfgno < 0) {
src = &udev->descriptor;
srclen = sizeof(struct usb_device_descriptor);
} else {
src = udev->rawdescriptors[cfgno];
srclen = __le16_to_cpu(udev->config[cfgno].desc.
wTotalLength);
}
if (off < srclen) {
n = min(nleft, srclen - (size_t) off);
memcpy(buf, src + off, n);
nleft -= n;
buf += n;
off = 0;
} else {
off -= srclen;
}
}
return count - nleft;
}
static struct bin_attribute dev_bin_attr_descriptors = {
.attr = {.name = "descriptors", .mode = 0444},
.read = read_descriptors,
.size = 18 + 65535, /* dev descr + max-size raw descriptor */
};
/*
* Show & store the current value of authorized_default
*/
static ssize_t authorized_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *rh_usb_dev = to_usb_device(dev);
struct usb_bus *usb_bus = rh_usb_dev->bus;
struct usb_hcd *hcd;
hcd = bus_to_hcd(usb_bus);
return sysfs_emit(buf, "%u\n", hcd->dev_policy);
}
static ssize_t authorized_default_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
unsigned int val;
struct usb_device *rh_usb_dev = to_usb_device(dev);
struct usb_bus *usb_bus = rh_usb_dev->bus;
struct usb_hcd *hcd;
hcd = bus_to_hcd(usb_bus);
result = sscanf(buf, "%u\n", &val);
if (result == 1) {
hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
val : USB_DEVICE_AUTHORIZE_ALL;
result = size;
} else {
result = -EINVAL;
}
return result;
}
static DEVICE_ATTR_RW(authorized_default);
/*
* interface_authorized_default_show - show default authorization status
* for USB interfaces
*
* note: interface_authorized_default is the default value
* for initializing the authorized attribute of interfaces
*/
static ssize_t interface_authorized_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *usb_dev = to_usb_device(dev);
struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
return sysfs_emit(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
}
/*
* interface_authorized_default_store - store default authorization status
* for USB interfaces
*
* note: interface_authorized_default is the default value
* for initializing the authorized attribute of interfaces
*/
static ssize_t interface_authorized_default_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_device *usb_dev = to_usb_device(dev);
struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
int rc = count;
bool val;
if (kstrtobool(buf, &val) != 0)
return -EINVAL;
if (val)
set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
else
clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
return rc;
}
static DEVICE_ATTR_RW(interface_authorized_default);
/* Group all the USB bus attributes */
static struct attribute *usb_bus_attrs[] = {
&dev_attr_authorized_default.attr,
&dev_attr_interface_authorized_default.attr,
NULL,
};
static const struct attribute_group usb_bus_attr_group = {
.name = NULL, /* we want them in the same directory */
.attrs = usb_bus_attrs,
};
static int add_default_authorized_attributes(struct device *dev)
{
int rc = 0;
if (is_usb_device(dev))
rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group);
return rc;
}
static void remove_default_authorized_attributes(struct device *dev)
{
if (is_usb_device(dev)) {
sysfs_remove_group(&dev->kobj, &usb_bus_attr_group);
}
}
int usb_create_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
int retval;
retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
if (retval)
goto error;
retval = add_persist_attributes(dev);
if (retval)
goto error;
retval = add_power_attributes(dev);
if (retval)
goto error;
if (is_root_hub(udev)) {
retval = add_default_authorized_attributes(dev);
if (retval)
goto error;
}
return retval;
error:
usb_remove_sysfs_dev_files(udev);
return retval;
}
void usb_remove_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
if (is_root_hub(udev))
remove_default_authorized_attributes(dev);
remove_power_attributes(dev);
remove_persist_attributes(dev);
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
/* Interface Association Descriptor fields */
#define usb_intf_assoc_attr(field, format_string) \
static ssize_t \
iad_##field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
return sysfs_emit(buf, format_string, \
intf->intf_assoc->field); \
} \
static DEVICE_ATTR_RO(iad_##field)
usb_intf_assoc_attr(bFirstInterface, "%02x\n");
usb_intf_assoc_attr(bInterfaceCount, "%02d\n");
usb_intf_assoc_attr(bFunctionClass, "%02x\n");
usb_intf_assoc_attr(bFunctionSubClass, "%02x\n");
usb_intf_assoc_attr(bFunctionProtocol, "%02x\n");
/* Interface fields */
#define usb_intf_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
return sysfs_emit(buf, format_string, \
intf->cur_altsetting->desc.field); \
} \
static DEVICE_ATTR_RO(field)
usb_intf_attr(bInterfaceNumber, "%02x\n");
usb_intf_attr(bAlternateSetting, "%2d\n");
usb_intf_attr(bNumEndpoints, "%02x\n");
usb_intf_attr(bInterfaceClass, "%02x\n");
usb_intf_attr(bInterfaceSubClass, "%02x\n");
usb_intf_attr(bInterfaceProtocol, "%02x\n");
static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf;
char *string;
intf = to_usb_interface(dev);
string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sysfs_emit(buf, "%s\n", string);
}
static DEVICE_ATTR_RO(interface);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf;
struct usb_device *udev;
struct usb_host_interface *alt;
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
alt = READ_ONCE(intf->cur_altsetting);
return sysfs_emit(buf,
"usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
le16_to_cpu(udev->descriptor.bcdDevice),
udev->descriptor.bDeviceClass,
udev->descriptor.bDeviceSubClass,
udev->descriptor.bDeviceProtocol,
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t supports_autosuspend_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int s;
s = device_lock_interruptible(dev);
if (s < 0)
return -EINTR;
/* Devices will be autosuspended even when an interface isn't claimed */
s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", s);
}
static DEVICE_ATTR_RO(supports_autosuspend);
/*
* interface_authorized_show - show authorization status of an USB interface
* 1 is authorized, 0 is deauthorized
*/
static ssize_t interface_authorized_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
return sysfs_emit(buf, "%u\n", intf->authorized);
}
/*
* interface_authorized_store - authorize or deauthorize an USB interface
*/
static ssize_t interface_authorized_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
bool val;
if (kstrtobool(buf, &val) != 0)
return -EINVAL;
if (val)
usb_authorize_interface(intf);
else
usb_deauthorize_interface(intf);
return count;
}
static struct device_attribute dev_attr_interface_authorized =
__ATTR(authorized, S_IRUGO | S_IWUSR,
interface_authorized_show, interface_authorized_store);
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
&dev_attr_bAlternateSetting.attr,
&dev_attr_bNumEndpoints.attr,
&dev_attr_bInterfaceClass.attr,
&dev_attr_bInterfaceSubClass.attr,
&dev_attr_bInterfaceProtocol.attr,
&dev_attr_modalias.attr,
&dev_attr_supports_autosuspend.attr,
&dev_attr_interface_authorized.attr,
NULL,
};
static const struct attribute_group intf_attr_grp = {
.attrs = intf_attrs,
};
static struct attribute *intf_assoc_attrs[] = {
&dev_attr_iad_bFirstInterface.attr,
&dev_attr_iad_bInterfaceCount.attr,
&dev_attr_iad_bFunctionClass.attr,
&dev_attr_iad_bFunctionSubClass.attr,
&dev_attr_iad_bFunctionProtocol.attr,
NULL,
};
static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_interface *intf = to_usb_interface(dev);
if (intf->intf_assoc == NULL)
return 0;
return a->mode;
}
static const struct attribute_group intf_assoc_attr_grp = {
.attrs = intf_assoc_attrs,
.is_visible = intf_assoc_attrs_are_visible,
};
static ssize_t wireless_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf;
intf = to_usb_interface(dev);
if (intf->wireless_status == USB_WIRELESS_STATUS_DISCONNECTED)
return sysfs_emit(buf, "%s\n", "disconnected");
return sysfs_emit(buf, "%s\n", "connected");
}
static DEVICE_ATTR_RO(wireless_status);
static struct attribute *intf_wireless_status_attrs[] = {
&dev_attr_wireless_status.attr,
NULL
};
static umode_t intf_wireless_status_attr_is_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb_interface *intf = to_usb_interface(dev);
if (a != &dev_attr_wireless_status.attr ||
intf->wireless_status != USB_WIRELESS_STATUS_NA)
return a->mode;
return 0;
}
static const struct attribute_group intf_wireless_status_attr_grp = {
.attrs = intf_wireless_status_attrs,
.is_visible = intf_wireless_status_attr_is_visible,
};
int usb_update_wireless_status_attr(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
int ret;
ret = sysfs_update_group(&dev->kobj, &intf_wireless_status_attr_grp);
if (ret < 0)
return ret;
sysfs_notify(&dev->kobj, NULL, "wireless_status");
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
return 0;
}
const struct attribute_group *usb_interface_groups[] = {
&intf_attr_grp,
&intf_assoc_attr_grp,
&intf_wireless_status_attr_grp,
NULL
};
void usb_create_sysfs_intf_files(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
if (intf->sysfs_files_created || intf->unregistering)
return;
if (!alt->string && !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
alt->string = usb_cache_string(udev, alt->desc.iInterface);
if (alt->string && device_create_file(&intf->dev, &dev_attr_interface)) {
/* This is not a serious error */
dev_dbg(&intf->dev, "interface string descriptor file not created\n");
}
intf->sysfs_files_created = 1;
}
void usb_remove_sysfs_intf_files(struct usb_interface *intf)
{
if (!intf->sysfs_files_created)
return;
device_remove_file(&intf->dev, &dev_attr_interface);
intf->sysfs_files_created = 0;
}
| linux-master | drivers/usb/core/sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/usb.c
*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2004
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* generic USB things that the real drivers can use..
*
* Think of this as a "USB library" rather than anything else,
* with no callbacks. Callbacks are evil.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/usb/of.h>
#include <asm/io.h>
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include "hub.h"
const char *usbcore_name = "usbcore";
static bool nousb; /* Disable USB when built into kernel image */
module_param(nousb, bool, 0444);
/*
* for external read access to <nousb>
*/
int usb_disabled(void)
{
return nousb;
}
EXPORT_SYMBOL_GPL(usb_disabled);
#ifdef CONFIG_PM
/* Default delay value, in seconds */
static int usb_autosuspend_delay = CONFIG_USB_AUTOSUSPEND_DELAY;
module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
#else
#define usb_autosuspend_delay 0
#endif
static bool match_endpoint(struct usb_endpoint_descriptor *epd,
struct usb_endpoint_descriptor **bulk_in,
struct usb_endpoint_descriptor **bulk_out,
struct usb_endpoint_descriptor **int_in,
struct usb_endpoint_descriptor **int_out)
{
switch (usb_endpoint_type(epd)) {
case USB_ENDPOINT_XFER_BULK:
if (usb_endpoint_dir_in(epd)) {
if (bulk_in && !*bulk_in) {
*bulk_in = epd;
break;
}
} else {
if (bulk_out && !*bulk_out) {
*bulk_out = epd;
break;
}
}
return false;
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(epd)) {
if (int_in && !*int_in) {
*int_in = epd;
break;
}
} else {
if (int_out && !*int_out) {
*int_out = epd;
break;
}
}
return false;
default:
return false;
}
return (!bulk_in || *bulk_in) && (!bulk_out || *bulk_out) &&
(!int_in || *int_in) && (!int_out || *int_out);
}
/**
* usb_find_common_endpoints() -- look up common endpoint descriptors
* @alt: alternate setting to search
* @bulk_in: pointer to descriptor pointer, or NULL
* @bulk_out: pointer to descriptor pointer, or NULL
* @int_in: pointer to descriptor pointer, or NULL
* @int_out: pointer to descriptor pointer, or NULL
*
* Search the alternate setting's endpoint descriptors for the first bulk-in,
* bulk-out, interrupt-in and interrupt-out endpoints and return them in the
* provided pointers (unless they are NULL).
*
* If a requested endpoint is not found, the corresponding pointer is set to
* NULL.
*
* Return: Zero if all requested descriptors were found, or -ENXIO otherwise.
*/
int usb_find_common_endpoints(struct usb_host_interface *alt,
struct usb_endpoint_descriptor **bulk_in,
struct usb_endpoint_descriptor **bulk_out,
struct usb_endpoint_descriptor **int_in,
struct usb_endpoint_descriptor **int_out)
{
struct usb_endpoint_descriptor *epd;
int i;
if (bulk_in)
*bulk_in = NULL;
if (bulk_out)
*bulk_out = NULL;
if (int_in)
*int_in = NULL;
if (int_out)
*int_out = NULL;
for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
epd = &alt->endpoint[i].desc;
if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out))
return 0;
}
return -ENXIO;
}
EXPORT_SYMBOL_GPL(usb_find_common_endpoints);
/**
* usb_find_common_endpoints_reverse() -- look up common endpoint descriptors
* @alt: alternate setting to search
* @bulk_in: pointer to descriptor pointer, or NULL
* @bulk_out: pointer to descriptor pointer, or NULL
* @int_in: pointer to descriptor pointer, or NULL
* @int_out: pointer to descriptor pointer, or NULL
*
* Search the alternate setting's endpoint descriptors for the last bulk-in,
* bulk-out, interrupt-in and interrupt-out endpoints and return them in the
* provided pointers (unless they are NULL).
*
* If a requested endpoint is not found, the corresponding pointer is set to
* NULL.
*
* Return: Zero if all requested descriptors were found, or -ENXIO otherwise.
*/
int usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
struct usb_endpoint_descriptor **bulk_in,
struct usb_endpoint_descriptor **bulk_out,
struct usb_endpoint_descriptor **int_in,
struct usb_endpoint_descriptor **int_out)
{
struct usb_endpoint_descriptor *epd;
int i;
if (bulk_in)
*bulk_in = NULL;
if (bulk_out)
*bulk_out = NULL;
if (int_in)
*int_in = NULL;
if (int_out)
*int_out = NULL;
for (i = alt->desc.bNumEndpoints - 1; i >= 0; --i) {
epd = &alt->endpoint[i].desc;
if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out))
return 0;
}
return -ENXIO;
}
EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse);
/**
* usb_find_endpoint() - Given an endpoint address, search for the endpoint's
* usb_host_endpoint structure in an interface's current altsetting.
* @intf: the interface whose current altsetting should be searched
* @ep_addr: the endpoint address (number and direction) to find
*
* Search the altsetting's list of endpoints for one with the specified address.
*
* Return: Pointer to the usb_host_endpoint if found, %NULL otherwise.
*/
static const struct usb_host_endpoint *usb_find_endpoint(
const struct usb_interface *intf, unsigned int ep_addr)
{
int n;
const struct usb_host_endpoint *ep;
n = intf->cur_altsetting->desc.bNumEndpoints;
ep = intf->cur_altsetting->endpoint;
for (; n > 0; (--n, ++ep)) {
if (ep->desc.bEndpointAddress == ep_addr)
return ep;
}
return NULL;
}
/**
* usb_check_bulk_endpoints - Check whether an interface's current altsetting
* contains a set of bulk endpoints with the given addresses.
* @intf: the interface whose current altsetting should be searched
* @ep_addrs: 0-terminated array of the endpoint addresses (number and
* direction) to look for
*
* Search for endpoints with the specified addresses and check their types.
*
* Return: %true if all the endpoints are found and are bulk, %false otherwise.
*/
bool usb_check_bulk_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs)
{
const struct usb_host_endpoint *ep;
for (; *ep_addrs; ++ep_addrs) {
ep = usb_find_endpoint(intf, *ep_addrs);
if (!ep || !usb_endpoint_xfer_bulk(&ep->desc))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints);
/**
* usb_check_int_endpoints - Check whether an interface's current altsetting
* contains a set of interrupt endpoints with the given addresses.
* @intf: the interface whose current altsetting should be searched
* @ep_addrs: 0-terminated array of the endpoint addresses (number and
* direction) to look for
*
* Search for endpoints with the specified addresses and check their types.
*
* Return: %true if all the endpoints are found and are interrupt,
* %false otherwise.
*/
bool usb_check_int_endpoints(
const struct usb_interface *intf, const u8 *ep_addrs)
{
const struct usb_host_endpoint *ep;
for (; *ep_addrs; ++ep_addrs) {
ep = usb_find_endpoint(intf, *ep_addrs);
if (!ep || !usb_endpoint_xfer_int(&ep->desc))
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(usb_check_int_endpoints);
/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
* for the given interface.
* @config: the configuration to search (not necessarily the current config).
* @iface_num: interface number to search in
* @alt_num: alternate interface setting number to search for.
*
* Search the configuration's interface cache for the given alt setting.
*
* Return: The alternate setting, if found. %NULL otherwise.
*/
struct usb_host_interface *usb_find_alt_setting(
struct usb_host_config *config,
unsigned int iface_num,
unsigned int alt_num)
{
struct usb_interface_cache *intf_cache = NULL;
int i;
if (!config)
return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
intf_cache = config->intf_cache[i];
break;
}
}
if (!intf_cache)
return NULL;
for (i = 0; i < intf_cache->num_altsetting; i++)
if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
return &intf_cache->altsetting[i];
printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
"config %u\n", alt_num, iface_num,
config->desc.bConfigurationValue);
return NULL;
}
EXPORT_SYMBOL_GPL(usb_find_alt_setting);
/**
* usb_ifnum_to_if - get the interface object with a given interface number
* @dev: the device whose current configuration is considered
* @ifnum: the desired interface
*
* This walks the device descriptor for the currently active configuration
* to find the interface object with the particular interface number.
*
* Note that configuration descriptors are not required to assign interface
* numbers sequentially, so that it would be incorrect to assume that
* the first interface in that descriptor corresponds to interface zero.
* This routine helps device drivers avoid such mistakes.
* However, you should make sure that you do the right thing with any
* alternate settings available for this interfaces.
*
* Don't call this function unless you are bound to one of the interfaces
* on this device or you have locked the device!
*
* Return: A pointer to the interface that has @ifnum as interface number,
* if found. %NULL otherwise.
*/
struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
unsigned ifnum)
{
struct usb_host_config *config = dev->actconfig;
int i;
if (!config)
return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++)
if (config->interface[i]->altsetting[0]
.desc.bInterfaceNumber == ifnum)
return config->interface[i];
return NULL;
}
EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
/**
* usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number.
* @intf: the interface containing the altsetting in question
* @altnum: the desired alternate setting number
*
* This searches the altsetting array of the specified interface for
* an entry with the correct bAlternateSetting value.
*
* Note that altsettings need not be stored sequentially by number, so
* it would be incorrect to assume that the first altsetting entry in
* the array corresponds to altsetting zero. This routine helps device
* drivers avoid such mistakes.
*
* Don't call this function unless you are bound to the intf interface
* or you have locked the device!
*
* Return: A pointer to the entry of the altsetting array of @intf that
* has @altnum as the alternate setting number. %NULL if not found.
*/
struct usb_host_interface *usb_altnum_to_altsetting(
const struct usb_interface *intf,
unsigned int altnum)
{
int i;
for (i = 0; i < intf->num_altsetting; i++) {
if (intf->altsetting[i].desc.bAlternateSetting == altnum)
return &intf->altsetting[i];
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
struct find_interface_arg {
int minor;
struct device_driver *drv;
};
static int __find_interface(struct device *dev, const void *data)
{
const struct find_interface_arg *arg = data;
struct usb_interface *intf;
if (!is_usb_interface(dev))
return 0;
if (dev->driver != arg->drv)
return 0;
intf = to_usb_interface(dev);
return intf->minor == arg->minor;
}
/**
* usb_find_interface - find usb_interface pointer for driver and device
* @drv: the driver whose current configuration is considered
* @minor: the minor number of the desired device
*
* This walks the bus device list and returns a pointer to the interface
* with the matching minor and driver. Note, this only works for devices
* that share the USB major number.
*
* Return: A pointer to the interface with the matching major and @minor.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
struct find_interface_arg argb;
struct device *dev;
argb.minor = minor;
argb.drv = &drv->drvwrap.driver;
dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
/* Drop reference count from bus_find_device */
put_device(dev);
return dev ? to_usb_interface(dev) : NULL;
}
EXPORT_SYMBOL_GPL(usb_find_interface);
struct each_dev_arg {
void *data;
int (*fn)(struct usb_device *, void *);
};
static int __each_dev(struct device *dev, void *data)
{
struct each_dev_arg *arg = (struct each_dev_arg *)data;
/* There are struct usb_interface on the same bus, filter them out */
if (!is_usb_device(dev))
return 0;
return arg->fn(to_usb_device(dev), arg->data);
}
/**
* usb_for_each_dev - iterate over all USB devices in the system
* @data: data pointer that will be handed to the callback function
* @fn: callback function to be called for each USB device
*
* Iterate over all USB devices and call @fn for each, passing it @data. If it
* returns anything other than 0, we break the iteration prematurely and return
* that value.
*/
int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *))
{
struct each_dev_arg arg = {data, fn};
return bus_for_each_dev(&usb_bus_type, NULL, &arg, __each_dev);
}
EXPORT_SYMBOL_GPL(usb_for_each_dev);
/**
* usb_release_dev - free a usb device structure when all users of it are finished.
* @dev: device that's been disconnected
*
* Will be called only by the device core when all users of this usb device are
* done.
*/
static void usb_release_dev(struct device *dev)
{
struct usb_device *udev;
struct usb_hcd *hcd;
udev = to_usb_device(dev);
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
usb_release_bos_descriptor(udev);
of_node_put(dev->of_node);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
kfree(udev->serial);
kfree(udev);
}
static int usb_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum))
return -ENOMEM;
if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum))
return -ENOMEM;
return 0;
}
#ifdef CONFIG_PM
/* USB device Power-Management thunks.
* There's no need to distinguish here between quiescing a USB device
* and powering it down; the generic_suspend() routine takes care of
* it by skipping the usb_port_suspend() call for a quiesce. And for
* USB interfaces there's no difference at all.
*/
static int usb_dev_prepare(struct device *dev)
{
return 0; /* Implement eventually? */
}
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
usb_resume_complete(dev);
}
static int usb_dev_suspend(struct device *dev)
{
return usb_suspend(dev, PMSG_SUSPEND);
}
static int usb_dev_resume(struct device *dev)
{
return usb_resume(dev, PMSG_RESUME);
}
static int usb_dev_freeze(struct device *dev)
{
return usb_suspend(dev, PMSG_FREEZE);
}
static int usb_dev_thaw(struct device *dev)
{
return usb_resume(dev, PMSG_THAW);
}
static int usb_dev_poweroff(struct device *dev)
{
return usb_suspend(dev, PMSG_HIBERNATE);
}
static int usb_dev_restore(struct device *dev)
{
return usb_resume(dev, PMSG_RESTORE);
}
static const struct dev_pm_ops usb_device_pm_ops = {
.prepare = usb_dev_prepare,
.complete = usb_dev_complete,
.suspend = usb_dev_suspend,
.resume = usb_dev_resume,
.freeze = usb_dev_freeze,
.thaw = usb_dev_thaw,
.poweroff = usb_dev_poweroff,
.restore = usb_dev_restore,
.runtime_suspend = usb_runtime_suspend,
.runtime_resume = usb_runtime_resume,
.runtime_idle = usb_runtime_idle,
};
#endif /* CONFIG_PM */
static char *usb_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid)
{
const struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
usb_dev->bus->busnum, usb_dev->devnum);
}
struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
.devnode = usb_devnode,
#ifdef CONFIG_PM
.pm = &usb_device_pm_ops,
#endif
};
static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd)
{
struct usb_hub *hub;
if (!dev->parent)
return true; /* Root hub always ok [and always wired] */
switch (hcd->dev_policy) {
case USB_DEVICE_AUTHORIZE_NONE:
default:
return false;
case USB_DEVICE_AUTHORIZE_ALL:
return true;
case USB_DEVICE_AUTHORIZE_INTERNAL:
hub = usb_hub_to_struct_hub(dev->parent);
return hub->ports[dev->portnum - 1]->connect_type ==
USB_PORT_CONNECT_TYPE_HARD_WIRED;
}
}
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
* @parent: hub to which device is connected; null to allocate a root hub
* @bus: bus used to access the device
* @port1: one-based index of port; ignored for root hubs
*
* Context: task context, might sleep.
*
* Only hub drivers (including virtual root hub drivers for host
* controllers) should ever call this.
*
* This call may not be used in a non-sleeping context.
*
* Return: On success, a pointer to the allocated usb device. %NULL on
* failure.
*/
struct usb_device *usb_alloc_dev(struct usb_device *parent,
struct usb_bus *bus, unsigned port1)
{
struct usb_device *dev;
struct usb_hcd *usb_hcd = bus_to_hcd(bus);
unsigned raw_port = port1;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
if (!usb_get_hcd(usb_hcd)) {
kfree(dev);
return NULL;
}
/* Root hubs aren't true devices, so don't allocate HCD resources */
if (usb_hcd->driver->alloc_dev && parent &&
!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
usb_put_hcd(bus_to_hcd(bus));
kfree(dev);
return NULL;
}
device_initialize(&dev->dev);
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
dev->dev.groups = usb_device_groups;
set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
atomic_set(&dev->urbnum, 0);
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
/* ep0 maxpacket comes later, from device descriptor */
usb_enable_endpoint(dev, &dev->ep0, false);
dev->can_submit = 1;
/* Save readable and stable topology id, distinguishing devices
* by location for diagnostics, tools, driver model, etc. The
* string is a path along hub ports, from the root. Each device's
* dev->devpath will be stable until USB is re-cabled, and hubs
* are often labeled with these port numbers. The name isn't
* as stable: bus->busnum changes easily from modprobe order,
* cardbus or pci hotplugging, and so on.
*/
if (unlikely(!parent)) {
dev->devpath[0] = '0';
dev->route = 0;
dev->dev.parent = bus->controller;
device_set_of_node_from_dev(&dev->dev, bus->sysdev);
dev_set_name(&dev->dev, "usb%d", bus->busnum);
} else {
/* match any labeling on the hubs; it's one-based */
if (parent->devpath[0] == '0') {
snprintf(dev->devpath, sizeof dev->devpath,
"%d", port1);
/* Root ports are not counted in route string */
dev->route = 0;
} else {
snprintf(dev->devpath, sizeof dev->devpath,
"%s.%d", parent->devpath, port1);
/* Route string assumes hubs have less than 16 ports */
if (port1 < 15)
dev->route = parent->route +
(port1 << ((parent->level - 1)*4));
else
dev->route = parent->route +
(15 << ((parent->level - 1)*4));
}
dev->dev.parent = &parent->dev;
dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
if (!parent->parent) {
/* device under root hub's port */
raw_port = usb_hcd_find_raw_port_number(usb_hcd,
port1);
}
dev->dev.of_node = usb_of_get_device_node(parent, raw_port);
/* hub driver sets up TT records */
}
dev->portnum = port1;
dev->bus = bus;
dev->parent = parent;
INIT_LIST_HEAD(&dev->filelist);
#ifdef CONFIG_PM
pm_runtime_set_autosuspend_delay(&dev->dev,
usb_autosuspend_delay * 1000);
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
#endif
dev->authorized = usb_dev_authorized(dev, usb_hcd);
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
/**
* usb_get_dev - increments the reference count of the usb device structure
* @dev: the device being referenced
*
* Each live reference to a device should be refcounted.
*
* Drivers for USB interfaces should normally record such references in
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_dev(), in their disconnect() methods.
* However, if a driver does not access the usb_device structure after
* its disconnect() method returns then refcounting is not necessary,
* because the USB core guarantees that a usb_device will not be
* deallocated until after all of its interface drivers have been unbound.
*
* Return: A pointer to the device with the incremented reference counter.
*/
struct usb_device *usb_get_dev(struct usb_device *dev)
{
if (dev)
get_device(&dev->dev);
return dev;
}
EXPORT_SYMBOL_GPL(usb_get_dev);
/**
* usb_put_dev - release a use of the usb device structure
* @dev: device that's been disconnected
*
* Must be called when a user of a device is finished with it. When the last
* user of the device calls this function, the memory of the device is freed.
*/
void usb_put_dev(struct usb_device *dev)
{
if (dev)
put_device(&dev->dev);
}
EXPORT_SYMBOL_GPL(usb_put_dev);
/**
* usb_get_intf - increments the reference count of the usb interface structure
* @intf: the interface being referenced
*
* Each live reference to a interface must be refcounted.
*
* Drivers for USB interfaces should normally record such references in
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_intf(), in their disconnect() methods.
* However, if a driver does not access the usb_interface structure after
* its disconnect() method returns then refcounting is not necessary,
* because the USB core guarantees that a usb_interface will not be
* deallocated until after its driver has been unbound.
*
* Return: A pointer to the interface with the incremented reference counter.
*/
struct usb_interface *usb_get_intf(struct usb_interface *intf)
{
if (intf)
get_device(&intf->dev);
return intf;
}
EXPORT_SYMBOL_GPL(usb_get_intf);
/**
* usb_put_intf - release a use of the usb interface structure
* @intf: interface that's been decremented
*
* Must be called when a user of an interface is finished with it. When the
* last user of the interface calls this function, the memory of the interface
* is freed.
*/
void usb_put_intf(struct usb_interface *intf)
{
if (intf)
put_device(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_put_intf);
/**
* usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint
* @intf: the usb interface
*
* While a USB device cannot perform DMA operations by itself, many USB
* controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint
* for the given USB interface, if any. The returned device structure must be
* released with put_device().
*
* See also usb_get_dma_device().
*
* Returns: A reference to the usb interface's DMA endpoint; or NULL if none
* exists.
*/
struct device *usb_intf_get_dma_device(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct device *dmadev;
if (!udev->bus)
return NULL;
dmadev = get_device(udev->bus->sysdev);
if (!dmadev || !dmadev->dma_mask) {
put_device(dmadev);
return NULL;
}
return dmadev;
}
EXPORT_SYMBOL_GPL(usb_intf_get_dma_device);
/* USB device locking
*
* USB devices and interfaces are locked using the semaphore in their
* embedded struct device. The hub driver guarantees that whenever a
* device is connected or disconnected, drivers are called with the
* USB device locked as well as their particular interface.
*
* Complications arise when several devices are to be locked at the same
* time. Only hub-aware drivers that are part of usbcore ever have to
* do this; nobody else needs to worry about it. The rule for locking
* is simple:
*
* When locking both a device and its parent, always lock the
* parent first.
*/
/**
* usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure
* @udev: device that's being locked
* @iface: interface bound to the driver making the request (optional)
*
* Attempts to acquire the device lock, but fails if the device is
* NOTATTACHED or SUSPENDED, or if iface is specified and the interface
* is neither BINDING nor BOUND. Rather than sleeping to wait for the
* lock, the routine polls repeatedly. This is to prevent deadlock with
* disconnect; in some drivers (such as usb-storage) the disconnect()
* or suspend() method will block waiting for a device reset to complete.
*
* Return: A negative error code for failure, otherwise 0.
*/
int usb_lock_device_for_reset(struct usb_device *udev,
const struct usb_interface *iface)
{
unsigned long jiffies_expire = jiffies + HZ;
if (udev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
while (!usb_trylock_device(udev)) {
/* If we can't acquire the lock after waiting one second,
* we're probably deadlocked */
if (time_after(jiffies, jiffies_expire))
return -EBUSY;
msleep(15);
if (udev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
/**
* usb_get_current_frame_number - return current bus frame number
* @dev: the device whose bus is being queried
*
* Return: The current frame number for the USB host controller used
* with the given USB device. This can be used when scheduling
* isochronous requests.
*
* Note: Different kinds of host controller have different "scheduling
* horizons". While one type might support scheduling only 32 frames
* into the future, others could support scheduling up to 1024 frames
* into the future.
*
*/
int usb_get_current_frame_number(struct usb_device *dev)
{
return usb_hcd_get_frame_number(dev);
}
EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
/*-------------------------------------------------------------------*/
/*
* __usb_get_extra_descriptor() finds a descriptor of specific type in the
* extra field of the interface and endpoint descriptor structs.
*/
int __usb_get_extra_descriptor(char *buffer, unsigned size,
unsigned char type, void **ptr, size_t minsize)
{
struct usb_descriptor_header *header;
while (size >= sizeof(struct usb_descriptor_header)) {
header = (struct usb_descriptor_header *)buffer;
if (header->bLength < 2 || header->bLength > size) {
printk(KERN_ERR
"%s: bogus descriptor, type %d length %d\n",
usbcore_name,
header->bDescriptorType,
header->bLength);
return -1;
}
if (header->bDescriptorType == type && header->bLength >= minsize) {
*ptr = header;
return 0;
}
buffer += header->bLength;
size -= header->bLength;
}
return -1;
}
EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
/**
* usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP
* @dev: device the buffer will be used with
* @size: requested buffer size
* @mem_flags: affect whether allocation may block
* @dma: used to return DMA address of buffer
*
* Return: Either null (indicating no buffer could be allocated), or the
* cpu-space pointer to a buffer that may be used to perform DMA to the
* specified device. Such cpu-space buffers are returned along with the DMA
* address (through the pointer provided).
*
* Note:
* These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags
* to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU
* hardware during URB completion/resubmit. The implementation varies between
* platforms, depending on details of how DMA will work to this device.
* Using these buffers also eliminates cacheline sharing problems on
* architectures where CPU caches are not DMA-coherent. On systems without
* bus-snooping caches, these buffers are uncached.
*
* When the buffer is no longer used, free it with usb_free_coherent().
*/
void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags,
dma_addr_t *dma)
{
if (!dev || !dev->bus)
return NULL;
return hcd_buffer_alloc(dev->bus, size, mem_flags, dma);
}
EXPORT_SYMBOL_GPL(usb_alloc_coherent);
/**
* usb_free_coherent - free memory allocated with usb_alloc_coherent()
* @dev: device the buffer was used with
* @size: requested buffer size
* @addr: CPU address of buffer
* @dma: DMA address of buffer
*
* This reclaims an I/O buffer, letting it be reused. The memory must have
* been allocated using usb_alloc_coherent(), and the parameters must match
* those provided in that allocation request.
*/
void usb_free_coherent(struct usb_device *dev, size_t size, void *addr,
dma_addr_t dma)
{
if (!dev || !dev->bus)
return;
if (!addr)
return;
hcd_buffer_free(dev->bus, size, addr, dma);
}
EXPORT_SYMBOL_GPL(usb_free_coherent);
/*
* Notifications of device and interface registration
*/
static int usb_bus_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->type == &usb_device_type)
(void) usb_create_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_create_sysfs_intf_files(to_usb_interface(dev));
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->type == &usb_device_type)
usb_remove_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_remove_sysfs_intf_files(to_usb_interface(dev));
break;
}
return 0;
}
static struct notifier_block usb_bus_nb = {
.notifier_call = usb_bus_notify,
};
static void usb_debugfs_init(void)
{
debugfs_create_file("devices", 0444, usb_debug_root, NULL,
&usbfs_devices_fops);
}
static void usb_debugfs_cleanup(void)
{
debugfs_lookup_and_remove("devices", usb_debug_root);
}
/*
* Init
*/
static int __init usb_init(void)
{
int retval;
if (usb_disabled()) {
pr_info("%s: USB support disabled\n", usbcore_name);
return 0;
}
usb_init_pool_max();
usb_debugfs_init();
usb_acpi_register();
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
if (retval)
goto bus_notifier_failed;
retval = usb_major_init();
if (retval)
goto major_init_failed;
retval = class_register(&usbmisc_class);
if (retval)
goto class_register_failed;
retval = usb_register(&usbfs_driver);
if (retval)
goto driver_register_failed;
retval = usb_devio_init();
if (retval)
goto usb_devio_init_failed;
retval = usb_hub_init();
if (retval)
goto hub_init_failed;
retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE);
if (!retval)
goto out;
usb_hub_cleanup();
hub_init_failed:
usb_devio_cleanup();
usb_devio_init_failed:
usb_deregister(&usbfs_driver);
driver_register_failed:
class_unregister(&usbmisc_class);
class_register_failed:
usb_major_cleanup();
major_init_failed:
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
usb_acpi_unregister();
usb_debugfs_cleanup();
out:
return retval;
}
/*
* Cleanup
*/
static void __exit usb_exit(void)
{
/* This will matter if shutdown/reboot does exitcalls. */
if (usb_disabled())
return;
usb_release_quirk_list();
usb_deregister_device_driver(&usb_generic_driver);
usb_major_cleanup();
usb_deregister(&usbfs_driver);
usb_devio_cleanup();
usb_hub_cleanup();
class_unregister(&usbmisc_class);
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
usb_acpi_unregister();
usb_debugfs_cleanup();
idr_destroy(&usb_bus_idr);
}
subsys_initcall(usb_init);
module_exit(usb_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/core/usb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB-ACPI glue code
*
* Copyright 2012 Red Hat <[email protected]>
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/usb/hcd.h>
#include "hub.h"
/**
* usb_acpi_power_manageable - check whether usb port has
* acpi power resource.
* @hdev: USB device belonging to the usb hub
* @index: port index based zero
*
* Return true if the port has acpi power resource and false if no.
*/
bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
{
acpi_handle port_handle;
int port1 = index + 1;
port_handle = usb_get_hub_port_acpi_handle(hdev,
port1);
if (port_handle)
return acpi_bus_power_manageable(port_handle);
else
return false;
}
EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
/**
* usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
* @hdev: USB device belonging to the usb hub
* @index: zero based port index
*
* Some USB3 ports may not support USB3 link power management U1/U2 states
* due to different retimer setup. ACPI provides _DSM method which returns 0x01
* if U1 and U2 states should be disabled. Evaluate _DSM with:
* Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
* Arg1: Revision ID = 0
* Arg2: Function Index = 5
* Arg3: (empty)
*
* Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
*/
int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
{
union acpi_object *obj;
acpi_handle port_handle;
int port1 = index + 1;
guid_t guid;
int ret;
ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
if (ret)
return ret;
port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
if (!port_handle) {
dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
return -ENODEV;
}
if (!acpi_check_dsm(port_handle, &guid, 0,
BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
return -ENODEV;
}
obj = acpi_evaluate_dsm_typed(port_handle, &guid, 0,
USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
return -EINVAL;
}
if (obj->integer.value == 0x01)
ret = 1;
ACPI_FREE(obj);
return ret;
}
EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
/**
* usb_acpi_set_power_state - control usb port's power via acpi power
* resource
* @hdev: USB device belonging to the usb hub
* @index: port index based zero
* @enable: power state expected to be set
*
* Notice to use usb_acpi_power_manageable() to check whether the usb port
* has acpi power resource before invoking this function.
*
* Returns 0 on success, else negative errno.
*/
int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_port *port_dev;
acpi_handle port_handle;
unsigned char state;
int port1 = index + 1;
int error = -EINVAL;
if (!hub)
return -ENODEV;
port_dev = hub->ports[port1 - 1];
port_handle = (acpi_handle) usb_get_hub_port_acpi_handle(hdev, port1);
if (!port_handle)
return error;
if (enable)
state = ACPI_STATE_D0;
else
state = ACPI_STATE_D3_COLD;
error = acpi_bus_set_power(port_handle, state);
if (!error)
dev_dbg(&port_dev->dev, "acpi: power was set to %d\n", enable);
else
dev_dbg(&port_dev->dev, "acpi: power failed to be set\n");
return error;
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
struct acpi_pld_info *pld)
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc = NULL;
acpi_status status;
/*
* According to 9.14 in ACPI Spec 6.2. _PLD indicates whether usb port
* is user visible and _UPC indicates whether it is connectable. If
* the port was visible and connectable, it could be freely connected
* and disconnected with USB devices. If no visible and connectable,
* a usb device is directly hard-wired to the port. If no visible and
* no connectable, the port would be not used.
*/
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
if (ACPI_FAILURE(status))
goto out;
upc = buffer.pointer;
if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
if (upc->package.elements[0].integer.value)
if (pld->user_visible)
connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
else
connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
else if (!pld->user_visible)
connect_type = USB_PORT_NOT_USED;
out:
kfree(upc);
return connect_type;
}
/*
* Private to usb-acpi, all the core needs to know is that
* port_dev->location is non-zero when it has been set by the firmware.
*/
#define USB_ACPI_LOCATION_VALID (1 << 31)
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
struct usb_device *udev;
struct acpi_device *adev;
acpi_handle *parent_handle;
int port1;
/* Get the struct usb_device point of port's hub */
udev = to_usb_device(port_dev->dev.parent->parent);
/*
* The root hub ports' parent is the root hub. The non-root-hub
* ports' parent is the parent hub port which the hub is
* connected to.
*/
if (!udev->parent) {
adev = ACPI_COMPANION(&udev->dev);
port1 = usb_hcd_find_raw_port_number(bus_to_hcd(udev->bus),
port_dev->portnum);
} else {
parent_handle = usb_get_hub_port_acpi_handle(udev->parent,
udev->portnum);
if (!parent_handle)
return NULL;
adev = acpi_fetch_acpi_dev(parent_handle);
port1 = port_dev->portnum;
}
return acpi_find_child_by_adr(adev, port1);
}
static struct acpi_device *
usb_acpi_find_companion_for_port(struct usb_port *port_dev)
{
struct acpi_device *adev;
struct acpi_pld_info *pld;
acpi_handle *handle;
acpi_status status;
adev = usb_acpi_get_companion_for_port(port_dev);
if (!adev)
return NULL;
handle = adev->handle;
status = acpi_get_physical_device_location(handle, &pld);
if (ACPI_SUCCESS(status) && pld) {
port_dev->location = USB_ACPI_LOCATION_VALID
| pld->group_token << 8 | pld->group_position;
port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
ACPI_FREE(pld);
}
return adev;
}
static struct acpi_device *
usb_acpi_find_companion_for_device(struct usb_device *udev)
{
struct acpi_device *adev;
struct usb_port *port_dev;
struct usb_hub *hub;
if (!udev->parent) {
/*
* root hub is only child (_ADR=0) under its parent, the HC.
* sysdev pointer is the HC as seen from firmware.
*/
adev = ACPI_COMPANION(udev->bus->sysdev);
return acpi_find_child_device(adev, 0, false);
}
hub = usb_hub_to_struct_hub(udev->parent);
if (!hub)
return NULL;
/*
* This is an embedded USB device connected to a port and such
* devices share port's ACPI companion.
*/
port_dev = hub->ports[udev->portnum - 1];
return usb_acpi_get_companion_for_port(port_dev);
}
static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
/*
* The USB hierarchy like following:
*
* Device (EHC1)
* Device (HUBN)
* Device (PR01)
* Device (PR11)
* Device (PR12)
* Device (FN12)
* Device (FN13)
* Device (PR13)
* ...
* where HUBN is root hub, and PRNN are USB ports and devices
* connected to them, and FNNN are individualk functions for
* connected composite USB devices. PRNN and FNNN may contain
* _CRS and other methods describing sideband resources for
* the connected device.
*
* On the kernel side both root hub and embedded USB devices are
* represented as instances of usb_device structure, and ports
* are represented as usb_port structures, so the whole process
* is split into 2 parts: finding companions for devices and
* finding companions for ports.
*
* Note that we do not handle individual functions of composite
* devices yet, for that we would need to assign companions to
* devices corresponding to USB interfaces.
*/
if (is_usb_device(dev))
return usb_acpi_find_companion_for_device(to_usb_device(dev));
else if (is_usb_port(dev))
return usb_acpi_find_companion_for_port(to_usb_port(dev));
return NULL;
}
static bool usb_acpi_bus_match(struct device *dev)
{
return is_usb_device(dev) || is_usb_port(dev);
}
static struct acpi_bus_type usb_acpi_bus = {
.name = "USB",
.match = usb_acpi_bus_match,
.find_companion = usb_acpi_find_companion,
};
int usb_acpi_register(void)
{
return register_acpi_bus_type(&usb_acpi_bus);
}
void usb_acpi_unregister(void)
{
unregister_acpi_bus_type(&usb_acpi_bus);
}
| linux-master | drivers/usb/core/usb-acpi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB hub driver.
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999 Johannes Erdfelt
* (C) Copyright 1999 Gregory P. Smith
* (C) Copyright 2001 Brad Hards ([email protected])
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/completion.h>
#include <linux/sched/mm.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/kcov.h>
#include <linux/ioctl.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
#include <linux/usb/onboard_hub.h>
#include <linux/usb/otg.h>
#include <linux/usb/quirks.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
#include <linux/kobject.h>
#include <linux/bitfield.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include "hub.h"
#include "otg_productlist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
#define USB_PRODUCT_USB5534B 0x5534
#define USB_VENDOR_CYPRESS 0x04b4
#define USB_PRODUCT_CY7C65632 0x6570
#define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451
#define USB_PRODUCT_TUSB8041_USB3 0x8140
#define USB_PRODUCT_TUSB8041_USB2 0x8142
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
#define USB_PING_RESPONSE_TIME 400 /* ns */
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
static DEFINE_SPINLOCK(device_state_lock);
/* workqueue to process hub events */
static struct workqueue_struct *hub_wq;
static void hub_event(struct work_struct *work);
/* synchronize hub-port add/remove and peering operations */
DEFINE_MUTEX(usb_port_peer_mutex);
/* cycle leds on hubs that aren't blinking for attention */
static bool blinkenlights;
module_param(blinkenlights, bool, S_IRUGO);
MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs");
/*
* Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
* 10 seconds to send reply for the initial 64-byte descriptor request.
*/
/* define initial 64-byte descriptor request timeout in milliseconds */
static int initial_descriptor_timeout = USB_CTRL_GET_TIMEOUT;
module_param(initial_descriptor_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(initial_descriptor_timeout,
"initial 64-byte descriptor request timeout in milliseconds "
"(default 5000 - 5.0 seconds)");
/*
* As of 2.6.10 we introduce a new USB device initialization scheme which
* closely resembles the way Windows works. Hopefully it will be compatible
* with a wider range of devices than the old scheme. However some previously
* working devices may start giving rise to "device not accepting address"
* errors; if that happens the user can try the old scheme by adjusting the
* following module parameters.
*
* For maximum flexibility there are two boolean parameters to control the
* hub driver's behavior. On the first initialization attempt, if the
* "old_scheme_first" parameter is set then the old scheme will be used,
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
static bool old_scheme_first;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
static bool use_both_schemes = true;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
"first one fails");
/* Mutual exclusion for EHCI CF initialization. This interferes with
* port reset on some companion controllers.
*/
DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_TIMEOUT 2000
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
if (hub_is_superspeedplus(hub->hdev))
return "10.0 Gb/s";
if (hub_is_superspeed(hub->hdev))
return "5.0 Gb/s";
if (portstatus & USB_PORT_STAT_HIGH_SPEED)
return "480 Mb/s";
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
return "1.5 Mb/s";
else
return "12 Mb/s";
}
/* Note that hdev or one of its children must be locked! */
struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
{
if (!hdev || !hdev->actconfig || !hdev->maxchild)
return NULL;
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
int usb_device_supports_lpm(struct usb_device *udev)
{
/* Some devices have trouble with LPM */
if (udev->quirks & USB_QUIRK_NO_LPM)
return 0;
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
*/
if (udev->speed == USB_SPEED_HIGH || udev->speed == USB_SPEED_FULL) {
if (udev->bos->ext_cap &&
(USB_LPM_SUPPORT &
le32_to_cpu(udev->bos->ext_cap->bmAttributes)))
return 1;
return 0;
}
/*
* According to the USB 3.0 spec, all USB 3.0 devices must support LPM.
* However, there are some that don't, and they set the U1/U2 exit
* latencies to zero.
*/
if (!udev->bos->ss_cap) {
dev_info(&udev->dev, "No LPM exit latency info found, disabling LPM.\n");
return 0;
}
if (udev->bos->ss_cap->bU1devExitLat == 0 &&
udev->bos->ss_cap->bU2DevExitLat == 0) {
if (udev->parent)
dev_info(&udev->dev, "LPM exit latency is zeroed, disabling LPM.\n");
else
dev_info(&udev->dev, "We don't know the algorithms for LPM for this host, disabling LPM.\n");
return 0;
}
if (!udev->parent || udev->parent->lpm_capable)
return 1;
return 0;
}
/*
* Set the Maximum Exit Latency (MEL) for the host to wakup up the path from
* U1/U2, send a PING to the device and receive a PING_RESPONSE.
* See USB 3.1 section C.1.5.2
*/
static void usb_set_lpm_mel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
unsigned int udev_exit_latency,
struct usb_hub *hub,
struct usb3_lpm_parameters *hub_lpm_params,
unsigned int hub_exit_latency)
{
unsigned int total_mel;
/*
* tMEL1. time to transition path from host to device into U0.
* MEL for parent already contains the delay up to parent, so only add
* the exit latency for the last link (pick the slower exit latency),
* and the hub header decode latency. See USB 3.1 section C 2.2.1
* Store MEL in nanoseconds
*/
total_mel = hub_lpm_params->mel +
max(udev_exit_latency, hub_exit_latency) * 1000 +
hub->descriptor->u.ss.bHubHdrDecLat * 100;
/*
* tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for
* each link + wHubDelay for each hub. Add only for last link.
* tMEL4, the time for PING_RESPONSE to traverse upstream is similar.
* Multiply by 2 to include it as well.
*/
total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) +
USB_TP_TRANSMISSION_DELAY) * 2;
/*
* tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE
* after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4
* to cover the delay if the PING_RESPONSE is queued behind a Max Packet
* Size DP.
* Note these delays should be added only once for the entire path, so
* add them to the MEL of the device connected to the roothub.
*/
if (!hub->hdev->parent)
total_mel += USB_PING_RESPONSE_TIME + 2100;
udev_lpm_params->mel = total_mel;
}
/*
* Set the maximum Device to Host Exit Latency (PEL) for the device to initiate
* a transition from either U1 or U2.
*/
static void usb_set_lpm_pel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params,
unsigned int udev_exit_latency,
struct usb_hub *hub,
struct usb3_lpm_parameters *hub_lpm_params,
unsigned int hub_exit_latency,
unsigned int port_to_port_exit_latency)
{
unsigned int first_link_pel;
unsigned int hub_pel;
/*
* First, the device sends an LFPS to transition the link between the
* device and the parent hub into U0. The exit latency is the bigger of
* the device exit latency or the hub exit latency.
*/
if (udev_exit_latency > hub_exit_latency)
first_link_pel = udev_exit_latency * 1000;
else
first_link_pel = hub_exit_latency * 1000;
/*
* When the hub starts to receive the LFPS, there is a slight delay for
* it to figure out that one of the ports is sending an LFPS. Then it
* will forward the LFPS to its upstream link. The exit latency is the
* delay, plus the PEL that we calculated for this hub.
*/
hub_pel = port_to_port_exit_latency * 1000 + hub_lpm_params->pel;
/*
* According to figure C-7 in the USB 3.0 spec, the PEL for this device
* is the greater of the two exit latencies.
*/
if (first_link_pel > hub_pel)
udev_lpm_params->pel = first_link_pel;
else
udev_lpm_params->pel = hub_pel;
}
/*
* Set the System Exit Latency (SEL) to indicate the total worst-case time from
* when a device initiates a transition to U0, until when it will receive the
* first packet from the host controller.
*
* Section C.1.5.1 describes the four components to this:
* - t1: device PEL
* - t2: time for the ERDY to make it from the device to the host.
* - t3: a host-specific delay to process the ERDY.
* - t4: time for the packet to make it from the host to the device.
*
* t3 is specific to both the xHCI host and the platform the host is integrated
* into. The Intel HW folks have said it's negligible, FIXME if a different
* vendor says otherwise.
*/
static void usb_set_lpm_sel(struct usb_device *udev,
struct usb3_lpm_parameters *udev_lpm_params)
{
struct usb_device *parent;
unsigned int num_hubs;
unsigned int total_sel;
/* t1 = device PEL */
total_sel = udev_lpm_params->pel;
/* How many external hubs are in between the device & the root port. */
for (parent = udev->parent, num_hubs = 0; parent->parent;
parent = parent->parent)
num_hubs++;
/* t2 = 2.1us + 250ns * (num_hubs - 1) */
if (num_hubs > 0)
total_sel += 2100 + 250 * (num_hubs - 1);
/* t4 = 250ns * num_hubs */
total_sel += 250 * num_hubs;
udev_lpm_params->sel = total_sel;
}
static void usb_set_lpm_parameters(struct usb_device *udev)
{
struct usb_hub *hub;
unsigned int port_to_port_delay;
unsigned int udev_u1_del;
unsigned int udev_u2_del;
unsigned int hub_u1_del;
unsigned int hub_u2_del;
if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
/* It doesn't take time to transition the roothub into U0, since it
* doesn't have an upstream link.
*/
if (!hub)
return;
udev_u1_del = udev->bos->ss_cap->bU1devExitLat;
udev_u2_del = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat);
hub_u1_del = udev->parent->bos->ss_cap->bU1devExitLat;
hub_u2_del = le16_to_cpu(udev->parent->bos->ss_cap->bU2DevExitLat);
usb_set_lpm_mel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del);
usb_set_lpm_mel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del);
/*
* Appendix C, section C.2.2.2, says that there is a slight delay from
* when the parent hub notices the downstream port is trying to
* transition to U0 to when the hub initiates a U0 transition on its
* upstream port. The section says the delays are tPort2PortU1EL and
* tPort2PortU2EL, but it doesn't define what they are.
*
* The hub chapter, sections 10.4.2.4 and 10.4.2.5 seem to be talking
* about the same delays. Use the maximum delay calculations from those
* sections. For U1, it's tHubPort2PortExitLat, which is 1us max. For
* U2, it's tHubPort2PortExitLat + U2DevExitLat - U1DevExitLat. I
* assume the device exit latencies they are talking about are the hub
* exit latencies.
*
* What do we do if the U2 exit latency is less than the U1 exit
* latency? It's possible, although not likely...
*/
port_to_port_delay = 1;
usb_set_lpm_pel(udev, &udev->u1_params, udev_u1_del,
hub, &udev->parent->u1_params, hub_u1_del,
port_to_port_delay);
if (hub_u2_del > hub_u1_del)
port_to_port_delay = 1 + hub_u2_del - hub_u1_del;
else
port_to_port_delay = 1 + hub_u1_del;
usb_set_lpm_pel(udev, &udev->u2_params, udev_u2_del,
hub, &udev->parent->u2_params, hub_u2_del,
port_to_port_delay);
/* Now that we've got PEL, calculate SEL. */
usb_set_lpm_sel(udev, &udev->u1_params);
usb_set_lpm_sel(udev, &udev->u2_params);
}
/* USB 2.0 spec Section 11.24.4.5 */
static int get_hub_descriptor(struct usb_device *hdev,
struct usb_hub_descriptor *desc)
{
int i, ret, size;
unsigned dtype;
if (hub_is_superspeed(hdev)) {
dtype = USB_DT_SS_HUB;
size = USB_DT_SS_HUB_SIZE;
} else {
dtype = USB_DT_HUB;
size = sizeof(struct usb_hub_descriptor);
}
for (i = 0; i < 3; i++) {
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
dtype << 8, 0, desc, size,
USB_CTRL_GET_TIMEOUT);
if (hub_is_superspeed(hdev)) {
if (ret == size)
return ret;
} else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
/* Make sure we have the DeviceRemovable field. */
size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
if (ret < size)
return -EMSGSIZE;
return ret;
}
}
return -EINVAL;
}
/*
* USB 2.0 spec Section 11.24.2.1
*/
static int clear_hub_feature(struct usb_device *hdev, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_HUB, feature, 0, NULL, 0, 1000);
}
/*
* USB 2.0 spec Section 11.24.2.2
*/
int usb_clear_port_feature(struct usb_device *hdev, int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_CLEAR_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
/*
* USB 2.0 spec Section 11.24.2.13
*/
static int set_port_feature(struct usb_device *hdev, int port1, int feature)
{
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
USB_REQ_SET_FEATURE, USB_RT_PORT, feature, port1,
NULL, 0, 1000);
}
static char *to_led_name(int selector)
{
switch (selector) {
case HUB_LED_AMBER:
return "amber";
case HUB_LED_GREEN:
return "green";
case HUB_LED_OFF:
return "off";
case HUB_LED_AUTO:
return "auto";
default:
return "??";
}
}
/*
* USB 2.0 spec Section 11.24.2.7.1.10 and table 11-7
* for info about using port indicators
*/
static void set_port_led(struct usb_hub *hub, int port1, int selector)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
int status;
status = set_port_feature(hub->hdev, (selector << 8) | port1,
USB_PORT_FEAT_INDICATOR);
dev_dbg(&port_dev->dev, "indicator %s status %d\n",
to_led_name(selector), status);
}
#define LED_CYCLE_PERIOD ((2*HZ)/3)
static void led_work(struct work_struct *work)
{
struct usb_hub *hub =
container_of(work, struct usb_hub, leds.work);
struct usb_device *hdev = hub->hdev;
unsigned i;
unsigned changed = 0;
int cursor = -1;
if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing)
return;
for (i = 0; i < hdev->maxchild; i++) {
unsigned selector, mode;
/* 30%-50% duty cycle */
switch (hub->indicator[i]) {
/* cycle marker */
case INDICATOR_CYCLE:
cursor = i;
selector = HUB_LED_AUTO;
mode = INDICATOR_AUTO;
break;
/* blinking green = sw attention */
case INDICATOR_GREEN_BLINK:
selector = HUB_LED_GREEN;
mode = INDICATOR_GREEN_BLINK_OFF;
break;
case INDICATOR_GREEN_BLINK_OFF:
selector = HUB_LED_OFF;
mode = INDICATOR_GREEN_BLINK;
break;
/* blinking amber = hw attention */
case INDICATOR_AMBER_BLINK:
selector = HUB_LED_AMBER;
mode = INDICATOR_AMBER_BLINK_OFF;
break;
case INDICATOR_AMBER_BLINK_OFF:
selector = HUB_LED_OFF;
mode = INDICATOR_AMBER_BLINK;
break;
/* blink green/amber = reserved */
case INDICATOR_ALT_BLINK:
selector = HUB_LED_GREEN;
mode = INDICATOR_ALT_BLINK_OFF;
break;
case INDICATOR_ALT_BLINK_OFF:
selector = HUB_LED_AMBER;
mode = INDICATOR_ALT_BLINK;
break;
default:
continue;
}
if (selector != HUB_LED_AUTO)
changed = 1;
set_port_led(hub, i + 1, selector);
hub->indicator[i] = mode;
}
if (!changed && blinkenlights) {
cursor++;
cursor %= hdev->maxchild;
set_port_led(hub, cursor + 1, HUB_LED_GREEN);
hub->indicator[cursor] = INDICATOR_CYCLE;
changed++;
}
if (changed)
queue_delayed_work(system_power_efficient_wq,
&hub->leds, LED_CYCLE_PERIOD);
}
/* use a short timeout for hub/port status fetches */
#define USB_STS_TIMEOUT 1000
#define USB_STS_RETRIES 5
/*
* USB 2.0 spec Section 11.24.2.6
*/
static int get_hub_status(struct usb_device *hdev,
struct usb_hub_status *data)
{
int i, status = -ETIMEDOUT;
for (i = 0; i < USB_STS_RETRIES &&
(status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
data, sizeof(*data), USB_STS_TIMEOUT);
}
return status;
}
/*
* USB 2.0 spec Section 11.24.2.7
* USB 3.1 takes into use the wValue and wLength fields, spec Section 10.16.2.6
*/
static int get_port_status(struct usb_device *hdev, int port1,
void *data, u16 value, u16 length)
{
int i, status = -ETIMEDOUT;
for (i = 0; i < USB_STS_RETRIES &&
(status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, value,
port1, data, length, USB_STS_TIMEOUT);
}
return status;
}
static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
u16 *status, u16 *change, u32 *ext_status)
{
int ret;
int len = 4;
if (type != HUB_PORT_STATUS)
len = 8;
mutex_lock(&hub->status_mutex);
ret = get_port_status(hub->hdev, port1, &hub->status->port, type, len);
if (ret < len) {
if (ret != -ENODEV)
dev_err(hub->intfdev,
"%s failed (err = %d)\n", __func__, ret);
if (ret >= 0)
ret = -EIO;
} else {
*status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange);
if (type != HUB_PORT_STATUS && ext_status)
*ext_status = le32_to_cpu(
hub->status->port.dwExtPortStatus);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
/*
* There is no need to lock status_mutex here, because status_mutex
* protects hub->status, and the phy driver only checks the port
* status without changing the status.
*/
if (!ret) {
struct usb_device *hdev = hub->hdev;
/*
* Only roothub will be notified of port state changes,
* since the USB PHY only cares about changes at the next
* level.
*/
if (is_root_hub(hdev)) {
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
if (hcd->usb_phy)
usb_phy_notify_port_status(hcd->usb_phy,
port1 - 1, *status, *change);
}
}
return ret;
}
int usb_hub_port_status(struct usb_hub *hub, int port1,
u16 *status, u16 *change)
{
return hub_ext_port_status(hub, port1, HUB_PORT_STATUS,
status, change, NULL);
}
static void hub_resubmit_irq_urb(struct usb_hub *hub)
{
unsigned long flags;
int status;
spin_lock_irqsave(&hub->irq_urb_lock, flags);
if (hub->quiescing) {
spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
return;
}
status = usb_submit_urb(hub->urb, GFP_ATOMIC);
if (status && status != -ENODEV && status != -EPERM &&
status != -ESHUTDOWN) {
dev_err(hub->intfdev, "resubmit --> %d\n", status);
mod_timer(&hub->irq_urb_retry, jiffies + HZ);
}
spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
}
static void hub_retry_irq_urb(struct timer_list *t)
{
struct usb_hub *hub = from_timer(hub, t, irq_urb_retry);
hub_resubmit_irq_urb(hub);
}
static void kick_hub_wq(struct usb_hub *hub)
{
struct usb_interface *intf;
if (hub->disconnected || work_pending(&hub->events))
return;
/*
* Suppress autosuspend until the event is proceed.
*
* Be careful and make sure that the symmetric operation is
* always called. We are here only when there is no pending
* work for this hub. Therefore put the interface either when
* the new work is called or when it is canceled.
*/
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface_no_resume(intf);
kref_get(&hub->kref);
if (queue_work(hub_wq, &hub->events))
return;
/* the work has already been scheduled */
usb_autopm_put_interface_async(intf);
kref_put(&hub->kref, hub_release);
}
void usb_kick_hub_wq(struct usb_device *hdev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (hub)
kick_hub_wq(hub);
}
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
*
* USB 3.0 hubs do not report the port link state change from U3 to U0 when the
* device initiates resume, so the USB core will not receive notice of the
* resume through the normal hub interrupt URB.
*/
void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
port_dev = hub->ports[portnum - 1];
if (port_dev && port_dev->child)
pm_wakeup_event(&port_dev->child->dev, 0);
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
}
EXPORT_SYMBOL_GPL(usb_wakeup_notification);
/* completion function, fires on port status changes and various faults */
static void hub_irq(struct urb *urb)
{
struct usb_hub *hub = urb->context;
int status = urb->status;
unsigned i;
unsigned long bits;
switch (status) {
case -ENOENT: /* synchronous unlink */
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware going away */
return;
default: /* presumably an error */
/* Cause a hub reset after 10 consecutive errors */
dev_dbg(hub->intfdev, "transfer --> %d\n", status);
if ((++hub->nerrors < 10) || hub->error)
goto resubmit;
hub->error = status;
fallthrough;
/* let hub_wq handle things */
case 0: /* we got data: port status changed */
bits = 0;
for (i = 0; i < urb->actual_length; ++i)
bits |= ((unsigned long) ((*hub->buffer)[i]))
<< (i*8);
hub->event_bits[0] = bits;
break;
}
hub->nerrors = 0;
/* Something happened, let hub_wq figure it out */
kick_hub_wq(hub);
resubmit:
hub_resubmit_irq_urb(hub);
}
/* USB 2.0 spec Section 11.24.2.3 */
static inline int
hub_clear_tt_buffer(struct usb_device *hdev, u16 devinfo, u16 tt)
{
/* Need to clear both directions for control ep */
if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_CONTROL) {
int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
devinfo ^ 0x8000, tt, NULL, 0, 1000);
if (status)
return status;
}
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
tt, NULL, 0, 1000);
}
/*
* enumeration blocks hub_wq for a long time. we use keventd instead, since
* long blocking there is the exception, not the rule. accordingly, HCDs
* talking to TTs must queue control transfers (not just bulk and iso), so
* both can talk to the same hub concurrently.
*/
static void hub_tt_work(struct work_struct *work)
{
struct usb_hub *hub =
container_of(work, struct usb_hub, tt.clear_work);
unsigned long flags;
spin_lock_irqsave(&hub->tt.lock, flags);
while (!list_empty(&hub->tt.clear_list)) {
struct list_head *next;
struct usb_tt_clear *clear;
struct usb_device *hdev = hub->hdev;
const struct hc_driver *drv;
int status;
next = hub->tt.clear_list.next;
clear = list_entry(next, struct usb_tt_clear, clear_list);
list_del(&clear->clear_list);
/* drop lock so HCD can concurrently report other TT errors */
spin_unlock_irqrestore(&hub->tt.lock, flags);
status = hub_clear_tt_buffer(hdev, clear->devinfo, clear->tt);
if (status && status != -ENODEV)
dev_err(&hdev->dev,
"clear tt %d (%04x) error %d\n",
clear->tt, clear->devinfo, status);
/* Tell the HCD, even if the operation failed */
drv = clear->hcd->driver;
if (drv->clear_tt_buffer_complete)
(drv->clear_tt_buffer_complete)(clear->hcd, clear->ep);
kfree(clear);
spin_lock_irqsave(&hub->tt.lock, flags);
}
spin_unlock_irqrestore(&hub->tt.lock, flags);
}
/**
* usb_hub_set_port_power - control hub port's power state
* @hdev: USB device belonging to the usb hub
* @hub: target hub
* @port1: port index
* @set: expected status
*
* call this function to control port's power via setting or
* clearing the port's PORT_POWER feature.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set)
{
int ret;
if (set)
ret = set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
else
ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (ret)
return ret;
if (set)
set_bit(port1, hub->power_bits);
else
clear_bit(port1, hub->power_bits);
return 0;
}
/**
* usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub
* @urb: an URB associated with the failed or incomplete split transaction
*
* High speed HCDs use this to tell the hub driver that some split control or
* bulk transaction failed in a way that requires clearing internal state of
* a transaction translator. This is normally detected (and reported) from
* interrupt context.
*
* It may not be possible for that hub to handle additional full (or low)
* speed transactions until that state is fully cleared out.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_clear_tt_buffer(struct urb *urb)
{
struct usb_device *udev = urb->dev;
int pipe = urb->pipe;
struct usb_tt *tt = udev->tt;
unsigned long flags;
struct usb_tt_clear *clear;
/* we've got to cope with an arbitrary number of pending TT clears,
* since each TT has "at least two" buffers that can need it (and
* there can be many TTs per hub). even if they're uncommon.
*/
clear = kmalloc(sizeof *clear, GFP_ATOMIC);
if (clear == NULL) {
dev_err(&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
/* FIXME recover somehow ... RESET_TT? */
return -ENOMEM;
}
/* info that CLEAR_TT_BUFFER needs */
clear->tt = tt->multi ? udev->ttport : 1;
clear->devinfo = usb_pipeendpoint (pipe);
clear->devinfo |= ((u16)udev->devaddr) << 4;
clear->devinfo |= usb_pipecontrol(pipe)
? (USB_ENDPOINT_XFER_CONTROL << 11)
: (USB_ENDPOINT_XFER_BULK << 11);
if (usb_pipein(pipe))
clear->devinfo |= 1 << 15;
/* info for completion callback */
clear->hcd = bus_to_hcd(udev->bus);
clear->ep = urb->ep;
/* tell keventd to clear state for this TT */
spin_lock_irqsave(&tt->lock, flags);
list_add_tail(&clear->clear_list, &tt->clear_list);
schedule_work(&tt->clear_work);
spin_unlock_irqrestore(&tt->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
static void hub_power_on(struct usb_hub *hub, bool do_delay)
{
int port1;
/* Enable power on each port. Some hubs have reserved values
* of LPSM (> 2) in their descriptors, even though they are
* USB 2.0 hubs. Some hubs do not implement port-power switching
* but only emulate it. In all cases, the ports won't work
* unless we send these messages to the hub.
*/
if (hub_is_port_power_switchable(hub))
dev_dbg(hub->intfdev, "enabling power on all ports\n");
else
dev_dbg(hub->intfdev, "trying to enable port power on "
"non-switchable hub\n");
for (port1 = 1; port1 <= hub->hdev->maxchild; port1++)
if (test_bit(port1, hub->power_bits))
set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
else
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_POWER);
if (do_delay)
msleep(hub_power_on_good_delay(hub));
}
static int hub_hub_status(struct usb_hub *hub,
u16 *status, u16 *change)
{
int ret;
mutex_lock(&hub->status_mutex);
ret = get_hub_status(hub->hdev, &hub->status->hub);
if (ret < 0) {
if (ret != -ENODEV)
dev_err(hub->intfdev,
"%s failed (err = %d)\n", __func__, ret);
} else {
*status = le16_to_cpu(hub->status->hub.wHubStatus);
*change = le16_to_cpu(hub->status->hub.wHubChange);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
return ret;
}
static int hub_set_port_link_state(struct usb_hub *hub, int port1,
unsigned int link_status)
{
return set_port_feature(hub->hdev,
port1 | (link_status << 3),
USB_PORT_FEAT_LINK_STATE);
}
/*
* Disable a port and mark a logical connect-change event, so that some
* time later hub_wq will disconnect() any existing usb_device on the port
* and will re-enumerate if there actually is a device attached.
*/
static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
{
dev_dbg(&hub->ports[port1 - 1]->dev, "logical disconnect\n");
hub_port_disable(hub, port1, 1);
/* FIXME let caller ask to power down the port:
* - some devices won't enumerate without a VBUS power cycle
* - SRP saves power that way
* - ... new call, TBD ...
* That's easy if this hub can switch power per-port, and
* hub_wq reactivates the port later (timer, SRP, etc).
* Powerdown must be optional, because of reset/DFU.
*/
set_bit(port1, hub->change_bits);
kick_hub_wq(hub);
}
/**
* usb_remove_device - disable a device's port on its parent hub
* @udev: device to be disabled and removed
* Context: @udev locked, must be able to sleep.
*
* After @udev's port has been disabled, hub_wq is notified and it will
* see that the device has been disconnected. When the device is
* physically unplugged and something is plugged in, the events will
* be received and processed normally.
*
* Return: 0 if successful. A negative error code otherwise.
*/
int usb_remove_device(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_interface *intf;
int ret;
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
ret = usb_autopm_get_interface(intf);
if (ret < 0)
return ret;
set_bit(udev->portnum, hub->removed_bits);
hub_port_logical_disconnect(hub, udev->portnum);
usb_autopm_put_interface(intf);
return 0;
}
enum hub_activation_type {
HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
};
static void hub_init_func2(struct work_struct *ws);
static void hub_init_func3(struct work_struct *ws);
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd;
int ret;
int port1;
int status;
bool need_debounce_delay = false;
unsigned delay;
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
if (hub->disconnected)
goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
}
kref_get(&hub->kref);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
* it uses to determine the downstream port number. So hub driver
* should send a set hub depth request to superspeed hub after
* the superspeed hub is set configuration in initialization or
* reset procedure.
*
* After a resume, port power should still be on.
* For any other type of activation, turn it on.
*/
if (type != HUB_RESUME) {
if (hdev->parent && hub_is_superspeed(hdev)) {
ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_SET_DEPTH, USB_RT_HUB,
hdev->level - 1, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (ret < 0)
dev_err(hub->intfdev,
"set hub depth failed\n");
}
/* Speed up system boot by using a delayed_work for the
* hub's initial power-up delays. This is pretty awkward
* and the implementation looks like a home-brewed sort of
* setjmp/longjmp, but it saves at least 100 ms for each
* root hub (assuming usbcore is compiled into the kernel
* rather than as a module). It adds up.
*
* This can't be done for HUB_RESUME or HUB_RESET_RESUME
* because for those activation types the ports have to be
* operational when we return. In theory this could be done
* for HUB_POST_RESET, but it's easier not to.
*/
if (type == HUB_INIT) {
delay = hub_power_on_good_delay(hub);
hub_power_on(hub, false);
INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
usb_autopm_get_interface_no_resume(
to_usb_interface(hub->intfdev));
return; /* Continues at init2: below */
} else if (type == HUB_RESET_RESUME) {
/* The internal host controller state for the hub device
* may be gone after a host power loss on system resume.
* Update the device's info so the HW knows it's a hub.
*/
hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_NOIO);
if (ret < 0) {
dev_err(hub->intfdev,
"Host not accepting hub info update\n");
dev_err(hub->intfdev,
"LS/FS devices and hubs may not work under this hub\n");
}
}
hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
/* Give some time on remote wakeup to let links to transit to U0 */
} else if (hub_is_superspeed(hub->hdev))
msleep(20);
init2:
/*
* Check each port and set hub->change_bits to let hub_wq know
* which ports need attention.
*/
for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
u16 portstatus, portchange;
portstatus = portchange = 0;
status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (status)
goto abort;
if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
dev_dbg(&port_dev->dev, "status %04x change %04x\n",
portstatus, portchange);
/*
* After anything other than HUB_RESUME (i.e., initialization
* or any sort of reset), every port should be disabled.
* Unconnected ports should likewise be disabled (paranoia),
* and so should ports for which we have no usb_device.
*/
if ((portstatus & USB_PORT_STAT_ENABLE) && (
type != HUB_RESUME ||
!(portstatus & USB_PORT_STAT_CONNECTION) ||
!udev ||
udev->state == USB_STATE_NOTATTACHED)) {
/*
* USB3 protocol ports will automatically transition
* to Enabled state when detect an USB3.0 device attach.
* Do not disable USB3 protocol ports, just pretend
* power was lost
*/
portstatus &= ~USB_PORT_STAT_ENABLE;
if (!hub_is_superspeed(hdev))
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
}
/* Make sure a warm-reset request is handled by port_event */
if (type == HUB_RESUME &&
hub_port_warm_reset_required(hub, port1, portstatus))
set_bit(port1, hub->event_bits);
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after
* link training completes.
*/
if (hub_is_superspeed(hdev) &&
((portstatus & USB_PORT_STAT_LINK_STATE) ==
USB_SS_PORT_LS_POLLING))
need_debounce_delay = true;
/* Clear status-change flags; we'll debounce later */
if (portchange & USB_PORT_STAT_C_CONNECTION) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (portchange & USB_PORT_STAT_C_ENABLE) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
}
if (portchange & USB_PORT_STAT_C_RESET) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_RESET);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
hub_is_superspeed(hub->hdev)) {
need_debounce_delay = true;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
/* We can forget about a "removed" device when there's a
* physical disconnect or the connect status changes.
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
(portchange & USB_PORT_STAT_C_CONNECTION))
clear_bit(port1, hub->removed_bits);
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell hub_wq to disconnect the device or
* check for a new connection or over current condition.
* Based on USB2.0 Spec Section 11.12.5,
* C_PORT_OVER_CURRENT could be set while
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
(portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
bool port_resumed = (portstatus &
USB_PORT_STAT_LINK_STATE) ==
USB_SS_PORT_LS_U0;
/* The power session apparently survived the resume.
* If there was an overcurrent or suspend change
* (i.e., remote wakeup request), have hub_wq
* take care of it. Look at the port link state
* for USB 3.0 hubs, since they don't have a suspend
* change bit, and they don't set the port link change
* bit on device-initiated resume.
*/
if (portchange || (hub_is_superspeed(hub->hdev) &&
port_resumed))
set_bit(port1, hub->event_bits);
} else if (udev->persist_enabled) {
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
/* Don't set the change_bits when the device
* was powered off.
*/
if (test_bit(port1, hub->power_bits))
set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
set_bit(port1, hub->change_bits);
}
}
/* If no port-status-change flags were set, we don't need any
* debouncing. If flags were set we can try to debounce the
* ports all at once right now, instead of letting hub_wq do them
* one at a time later on.
*
* If any port-status changes do occur during this delay, hub_wq
* will see them later and handle them normally.
*/
if (need_debounce_delay) {
delay = HUB_DEBOUNCE_STABLE;
/* Don't do a long sleep inside a workqueue routine */
if (type == HUB_INIT2) {
INIT_DELAYED_WORK(&hub->init_work, hub_init_func3);
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
}
}
init3:
hub->quiescing = 0;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
dev_err(hub->intfdev, "activate --> %d\n", status);
if (hub->has_indicators && blinkenlights)
queue_delayed_work(system_power_efficient_wq,
&hub->leds, LED_CYCLE_PERIOD);
/* Scan all ports that need attention */
kick_hub_wq(hub);
abort:
if (type == HUB_INIT2 || type == HUB_INIT3) {
/* Allow autosuspend if it was suppressed */
disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
device_unlock(&hdev->dev);
}
kref_put(&hub->kref, hub_release);
}
/* Implement the continuations for the delays above */
static void hub_init_func2(struct work_struct *ws)
{
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
hub_activate(hub, HUB_INIT2);
}
static void hub_init_func3(struct work_struct *ws)
{
struct usb_hub *hub = container_of(ws, struct usb_hub, init_work.work);
hub_activate(hub, HUB_INIT3);
}
enum hub_quiescing_type {
HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
};
static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
{
struct usb_device *hdev = hub->hdev;
unsigned long flags;
int i;
/* hub_wq and related activity won't re-trigger */
spin_lock_irqsave(&hub->irq_urb_lock, flags);
hub->quiescing = 1;
spin_unlock_irqrestore(&hub->irq_urb_lock, flags);
if (type != HUB_SUSPEND) {
/* Disconnect all the children */
for (i = 0; i < hdev->maxchild; ++i) {
if (hub->ports[i]->child)
usb_disconnect(&hub->ports[i]->child);
}
}
/* Stop hub_wq and related activity */
del_timer_sync(&hub->irq_urb_retry);
usb_kill_urb(hub->urb);
if (hub->has_indicators)
cancel_delayed_work_sync(&hub->leds);
if (hub->tt.hub)
flush_work(&hub->tt.clear_work);
}
static void hub_pm_barrier_for_all_ports(struct usb_hub *hub)
{
int i;
for (i = 0; i < hub->hdev->maxchild; ++i)
pm_runtime_barrier(&hub->ports[i]->dev);
}
/* caller has locked the hub device */
static int hub_pre_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
hub_quiesce(hub, HUB_PRE_RESET);
hub->in_reset = 1;
hub_pm_barrier_for_all_ports(hub);
return 0;
}
/* caller has locked the hub device */
static int hub_post_reset(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
hub->in_reset = 0;
hub_pm_barrier_for_all_ports(hub);
hub_activate(hub, HUB_POST_RESET);
return 0;
}
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
struct usb_hcd *hcd;
struct usb_device *hdev = hub->hdev;
struct device *hub_dev = hub->intfdev;
u16 hubstatus, hubchange;
u16 wHubCharacteristics;
unsigned int pipe;
int maxp, ret, i;
char *message = "out of memory";
unsigned unit_load;
unsigned full_load;
unsigned maxchild;
hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
if (!hub->buffer) {
ret = -ENOMEM;
goto fail;
}
hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
if (!hub->status) {
ret = -ENOMEM;
goto fail;
}
mutex_init(&hub->status_mutex);
hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
ret = -ENOMEM;
goto fail;
}
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
* but a (non-SS) hub can/will return fewer bytes here.
*/
ret = get_hub_descriptor(hdev, hub->descriptor);
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
}
maxchild = USB_MAXCHILDREN;
if (hub_is_superspeed(hdev))
maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
if (hub->descriptor->bNbrPorts > maxchild) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
} else if (hub->descriptor->bNbrPorts == 0) {
message = "hub doesn't have any ports!";
ret = -ENODEV;
goto fail;
}
/*
* Accumulate wHubDelay + 40ns for every hub in the tree of devices.
* The resulting value will be used for SetIsochDelay() request.
*/
if (hub_is_superspeed(hdev) || hub_is_superspeedplus(hdev)) {
u32 delay = __le16_to_cpu(hub->descriptor->u.ss.wHubDelay);
if (hdev->parent)
delay += hdev->parent->hub_delay;
delay += USB_TP_TRANSMISSION_DELAY;
hdev->hub_delay = min_t(u32, delay, USB_TP_TRANSMISSION_DELAY_MAX);
}
maxchild = hub->descriptor->bNbrPorts;
dev_info(hub_dev, "%d port%s detected\n", maxchild,
(maxchild == 1) ? "" : "s");
hub->ports = kcalloc(maxchild, sizeof(struct usb_port *), GFP_KERNEL);
if (!hub->ports) {
ret = -ENOMEM;
goto fail;
}
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (hub_is_superspeed(hdev)) {
unit_load = 150;
full_load = 900;
} else {
unit_load = 100;
full_load = 500;
}
/* FIXME for USB 3.0, skip for now */
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
!(hub_is_superspeed(hdev))) {
char portstr[USB_MAXCHILDREN + 1];
for (i = 0; i < maxchild; i++)
portstr[i] = hub->descriptor->u.hs.DeviceRemovable
[((i + 1) / 8)] & (1 << ((i + 1) % 8))
? 'F' : 'R';
portstr[maxchild] = 0;
dev_dbg(hub_dev, "compound device; port removable status: %s\n", portstr);
} else
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
case HUB_CHAR_COMMON_LPSM:
dev_dbg(hub_dev, "ganged power switching\n");
break;
case HUB_CHAR_INDV_PORT_LPSM:
dev_dbg(hub_dev, "individual port power switching\n");
break;
case HUB_CHAR_NO_LPSM:
case HUB_CHAR_LPSM:
dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
case HUB_CHAR_COMMON_OCPM:
dev_dbg(hub_dev, "global over-current protection\n");
break;
case HUB_CHAR_INDV_PORT_OCPM:
dev_dbg(hub_dev, "individual port over-current protection\n");
break;
case HUB_CHAR_NO_OCPM:
case HUB_CHAR_OCPM:
dev_dbg(hub_dev, "no over-current protection\n");
break;
}
spin_lock_init(&hub->tt.lock);
INIT_LIST_HEAD(&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
case USB_HUB_PR_FS:
break;
case USB_HUB_PR_HS_SINGLE_TT:
dev_dbg(hub_dev, "Single TT\n");
hub->tt.hub = hdev;
break;
case USB_HUB_PR_HS_MULTI_TT:
ret = usb_set_interface(hdev, 0, 1);
if (ret == 0) {
dev_dbg(hub_dev, "TT per port\n");
hub->tt.multi = 1;
} else
dev_err(hub_dev, "Using single TT (err %d)\n",
ret);
hub->tt.hub = hdev;
break;
case USB_HUB_PR_SS:
/* USB 3.0 hubs don't have a TT */
break;
default:
dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
hdev->descriptor.bDeviceProtocol);
break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
switch (wHubCharacteristics & HUB_CHAR_TTTT) {
case HUB_TTTT_8_BITS:
if (hdev->descriptor.bDeviceProtocol != 0) {
hub->tt.think_time = 666;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
8, hub->tt.think_time);
}
break;
case HUB_TTTT_16_BITS:
hub->tt.think_time = 666 * 2;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
16, hub->tt.think_time);
break;
case HUB_TTTT_24_BITS:
hub->tt.think_time = 666 * 3;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
24, hub->tt.think_time);
break;
case HUB_TTTT_32_BITS:
hub->tt.think_time = 666 * 4;
dev_dbg(hub_dev, "TT requires at most %d "
"FS bit times (%d ns)\n",
32, hub->tt.think_time);
break;
}
/* probe() zeroes hub->indicator[] */
if (wHubCharacteristics & HUB_CHAR_PORTIND) {
hub->has_indicators = 1;
dev_dbg(hub_dev, "Port indicators are supported\n");
}
dev_dbg(hub_dev, "power on to power good time: %dms\n",
hub->descriptor->bPwrOn2PwrGood * 2);
/* power budgeting mostly matters with bus-powered hubs,
* and battery-powered root hubs (may provide just 8 mA).
*/
ret = usb_get_std_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
if (ret) {
message = "can't get hub status";
goto fail;
}
hcd = bus_to_hcd(hdev->bus);
if (hdev == hdev->bus->root_hub) {
if (hcd->power_budget > 0)
hdev->bus_mA = hcd->power_budget;
else
hdev->bus_mA = full_load * maxchild;
if (hdev->bus_mA >= full_load)
hub->mA_per_port = full_load;
else {
hub->mA_per_port = hdev->bus_mA;
hub->limited_power = 1;
}
} else if ((hubstatus & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
int remaining = hdev->bus_mA -
hub->descriptor->bHubContrCurrent;
dev_dbg(hub_dev, "hub controller current requirement: %dmA\n",
hub->descriptor->bHubContrCurrent);
hub->limited_power = 1;
if (remaining < maxchild * unit_load)
dev_warn(hub_dev,
"insufficient power available "
"to use all downstream ports\n");
hub->mA_per_port = unit_load; /* 7.2.1 */
} else { /* Self-powered external hub */
/* FIXME: What about battery-powered external hubs that
* provide less current per port? */
hub->mA_per_port = full_load;
}
if (hub->mA_per_port < full_load)
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
goto fail;
}
/* local power status reports aren't always correct */
if (hdev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_SELFPOWER)
dev_dbg(hub_dev, "local power source is %s\n",
(hubstatus & HUB_STATUS_LOCAL_POWER)
? "lost (inactive)" : "good");
if ((wHubCharacteristics & HUB_CHAR_OCPM) == 0)
dev_dbg(hub_dev, "%sover-current condition exists\n",
(hubstatus & HUB_STATUS_OVERCURRENT) ? "" : "no ");
/* set up the interrupt endpoint
* We use the EP's maxpacket size instead of (PORTS+1+7)/8
* bytes as USB2.0[11.12.3] says because some hubs are known
* to send more data (and thus cause overflow). For root hubs,
* maxpktsize is defined in hcd.c's fake endpoint descriptors
* to be big enough for at least USB_MAXCHILDREN ports. */
pipe = usb_rcvintpipe(hdev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(hdev, pipe);
if (maxp > sizeof(*hub->buffer))
maxp = sizeof(*hub->buffer);
hub->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hub->urb) {
ret = -ENOMEM;
goto fail;
}
usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
hub, endpoint->bInterval);
/* maybe cycle the hub leds */
if (hub->has_indicators && blinkenlights)
hub->indicator[0] = INDICATOR_CYCLE;
mutex_lock(&usb_port_peer_mutex);
for (i = 0; i < maxchild; i++) {
ret = usb_hub_create_port_device(hub, i + 1);
if (ret < 0) {
dev_err(hub->intfdev,
"couldn't create port%d device.\n", i + 1);
break;
}
}
hdev->maxchild = i;
for (i = 0; i < hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i];
pm_runtime_put(&port_dev->dev);
}
mutex_unlock(&usb_port_peer_mutex);
if (ret < 0)
goto fail;
/* Update the HCD's internal representation of this hub before hub_wq
* starts getting port status changes for devices under the hub.
*/
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_KERNEL);
if (ret < 0) {
message = "can't update HCD hub info";
goto fail;
}
}
usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
hub_activate(hub, HUB_INIT);
return 0;
fail:
dev_err(hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
return ret;
}
static void hub_release(struct kref *kref)
{
struct usb_hub *hub = container_of(kref, struct usb_hub, kref);
usb_put_dev(hub->hdev);
usb_put_intf(to_usb_interface(hub->intfdev));
kfree(hub);
}
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = interface_to_usbdev(intf);
int port1;
/*
* Stop adding new hub events. We do not want to block here and thus
* will not try to remove any pending work item.
*/
hub->disconnected = 1;
/* Disconnect all children and quiesce the hub */
hub->error = 0;
hub_quiesce(hub, HUB_DISCONNECT);
mutex_lock(&usb_port_peer_mutex);
/* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
port1 = hdev->maxchild;
hdev->maxchild = 0;
usb_set_intfdata(intf, NULL);
spin_unlock_irq(&device_state_lock);
for (; port1 > 0; --port1)
usb_hub_remove_port_device(hub, port1);
mutex_unlock(&usb_port_peer_mutex);
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
usb_free_urb(hub->urb);
kfree(hub->ports);
kfree(hub->descriptor);
kfree(hub->status);
kfree(hub->buffer);
pm_suspend_ignore_children(&intf->dev, false);
if (hub->quirk_disable_autosuspend)
usb_autopm_put_interface(intf);
onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
kref_put(&hub->kref, hub_release);
}
static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
{
/* Some hubs have a subclass of 1, which AFAICT according to the */
/* specs is not defined, but it works */
if (desc->desc.bInterfaceSubClass != 0 &&
desc->desc.bInterfaceSubClass != 1)
return false;
/* Multiple endpoints? What kind of mutant ninja-hub is this? */
if (desc->desc.bNumEndpoints != 1)
return false;
/* If the first endpoint is not interrupt IN, we'd better punt! */
if (!usb_endpoint_is_int_in(&desc->endpoint[0].desc))
return false;
return true;
}
static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_host_interface *desc;
struct usb_device *hdev;
struct usb_hub *hub;
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
/*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
*
* - Unlike other drivers, the hub driver does not rely on the
* autosuspend delay to provide enough time to handle a wakeup
* event, and the submitted status URB is just to check future
* change on hub downstream ports, so it is safe to do it.
*
* - The patch might cause one or more auto supend/resume for
* below very rare devices when they are plugged into hub
* first time:
*
* devices having trouble initializing, and disconnect
* themselves from the bus and then reconnect a second
* or so later
*
* devices just for downloading firmware, and disconnects
* themselves after completing it
*
* For these quite rare devices, their drivers may change the
* autosuspend delay of their parent hub in the probe() to one
* appropriate value to avoid the subtle problem if someone
* does care it.
*
* - The patch may cause one or more auto suspend/resume on
* hub during running 'lsusb', but it is probably too
* infrequent to worry about.
*
* - Change autosuspend delay of hub can avoid unnecessary auto
* suspend timer for hub, also may decrease power consumption
* of USB bus.
*
* - If user has indicated to prevent autosuspend by passing
* usbcore.autosuspend = -1 then keep autosuspend disabled.
*/
#ifdef CONFIG_PM
if (hdev->dev.power.autosuspend_delay >= 0)
pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
#endif
/*
* Hubs have proper suspend/resume support, except for root hubs
* where the controller driver doesn't have bus_suspend and
* bus_resume methods.
*/
if (hdev->parent) { /* normal device */
usb_enable_autosuspend(hdev);
} else { /* root hub */
const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
if (drv->bus_suspend && drv->bus_resume)
usb_enable_autosuspend(hdev);
}
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
return -E2BIG;
}
#ifdef CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB
if (hdev->parent) {
dev_warn(&intf->dev, "ignoring external hub\n");
return -ENODEV;
}
#endif
if (!hub_descriptor_is_sane(desc)) {
dev_err(&intf->dev, "bad descriptor, ignoring hub\n");
return -EIO;
}
/* We found a hub */
dev_info(&intf->dev, "USB hub found\n");
hub = kzalloc(sizeof(*hub), GFP_KERNEL);
if (!hub)
return -ENOMEM;
kref_init(&hub->kref);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
INIT_DELAYED_WORK(&hub->leds, led_work);
INIT_DELAYED_WORK(&hub->init_work, NULL);
INIT_WORK(&hub->events, hub_event);
INIT_LIST_HEAD(&hub->onboard_hub_devs);
spin_lock_init(&hub->irq_urb_lock);
timer_setup(&hub->irq_urb_retry, hub_retry_irq_urb, 0);
usb_get_intf(intf);
usb_get_dev(hdev);
usb_set_intfdata(intf, hub);
intf->needs_remote_wakeup = 1;
pm_suspend_ignore_children(&intf->dev, true);
if (hdev->speed == USB_SPEED_HIGH)
highspeed_hubs++;
if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
hub->quirk_check_port_auto_suspend = 1;
if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
hub->quirk_disable_autosuspend = 1;
usb_autopm_get_interface_no_resume(intf);
}
if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) {
onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs);
return 0;
}
hub_disconnect(intf);
return -ENODEV;
}
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
struct usb_device *hdev = interface_to_usbdev(intf);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
/* assert ifno == 0 (part of hub spec) */
switch (code) {
case USBDEVFS_HUB_PORTINFO: {
struct usbdevfs_hub_portinfo *info = user_data;
int i;
spin_lock_irq(&device_state_lock);
if (hdev->devnum <= 0)
info->nports = 0;
else {
info->nports = hdev->maxchild;
for (i = 0; i < info->nports; i++) {
if (hub->ports[i]->child == NULL)
info->port[i] = 0;
else
info->port[i] =
hub->ports[i]->child->devnum;
}
}
spin_unlock_irq(&device_state_lock);
return info->nports + 1;
}
default:
return -ENOSYS;
}
}
/*
* Allow user programs to claim ports on a hub. When a device is attached
* to one of these "claimed" ports, the program will "own" the device.
*/
static int find_port_owner(struct usb_device *hdev, unsigned port1,
struct usb_dev_state ***ppowner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (hdev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (port1 == 0 || port1 > hdev->maxchild)
return -EINVAL;
/* Devices not managed by the hub driver
* will always have maxchild equal to 0.
*/
*ppowner = &(hub->ports[port1 - 1]->port_owner);
return 0;
}
/* In the following three functions, the caller must hold hdev's lock */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
struct usb_dev_state *owner)
{
int rc;
struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner)
return -EBUSY;
*powner = owner;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hub_claim_port);
int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
struct usb_dev_state *owner)
{
int rc;
struct usb_dev_state **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner != owner)
return -ENOENT;
*powner = NULL;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hub_release_port);
void usb_hub_release_all_ports(struct usb_device *hdev, struct usb_dev_state *owner)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
int n;
for (n = 0; n < hdev->maxchild; n++) {
if (hub->ports[n]->port_owner == owner)
hub->ports[n]->port_owner = NULL;
}
}
/* The caller must hold udev's lock */
bool usb_device_is_owned(struct usb_device *udev)
{
struct usb_hub *hub;
if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
return false;
hub = usb_hub_to_struct_hub(udev->parent);
return !!hub->ports[udev->portnum - 1]->port_owner;
}
static void update_port_device_state(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_port *port_dev;
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[udev->portnum - 1];
WRITE_ONCE(port_dev->state, udev->state);
sysfs_notify_dirent(port_dev->state_kn);
}
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
for (i = 0; i < udev->maxchild; ++i) {
if (hub->ports[i]->child)
recursively_mark_NOTATTACHED(hub->ports[i]->child);
}
if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
udev->state = USB_STATE_NOTATTACHED;
update_port_device_state(udev);
}
/**
* usb_set_device_state - change a device's current state (usbcore, hcds)
* @udev: pointer to device whose state should be changed
* @new_state: new state value to be stored
*
* udev->state is _not_ fully protected by the device lock. Although
* most transitions are made only while holding the lock, the state can
* can change to USB_STATE_NOTATTACHED at almost any time. This
* is so that devices can be marked as disconnected as soon as possible,
* without having to wait for any semaphores to be released. As a result,
* all changes to any device's state must be protected by the
* device_state_lock spinlock.
*
* Once a device has been added to the device tree, all changes to its state
* should be made using this routine. The state should _not_ be set directly.
*
* If udev->state is already USB_STATE_NOTATTACHED then no change is made.
* Otherwise udev->state is set to new_state, and if new_state is
* USB_STATE_NOTATTACHED then all of udev's descendants' states are also set
* to USB_STATE_NOTATTACHED.
*/
void usb_set_device_state(struct usb_device *udev,
enum usb_device_state new_state)
{
unsigned long flags;
int wakeup = -1;
spin_lock_irqsave(&device_state_lock, flags);
if (udev->state == USB_STATE_NOTATTACHED)
; /* do nothing */
else if (new_state != USB_STATE_NOTATTACHED) {
/* root hub wakeup capabilities are managed out-of-band
* and may involve silicon errata ... ignore them here.
*/
if (udev->parent) {
if (udev->state == USB_STATE_SUSPENDED
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
wakeup = (udev->quirks &
USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 :
udev->actconfig->desc.bmAttributes &
USB_CONFIG_ATT_WAKEUP;
else
wakeup = 0;
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
else if (new_state == USB_STATE_SUSPENDED &&
udev->state != USB_STATE_SUSPENDED)
udev->active_duration += jiffies;
udev->state = new_state;
update_port_device_state(udev);
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
if (wakeup >= 0)
device_set_wakeup_capable(&udev->dev, wakeup);
}
EXPORT_SYMBOL_GPL(usb_set_device_state);
/*
* Choose a device number.
*
* Device numbers are used as filenames in usbfs. On USB-1.1 and
* USB-2.0 buses they are also used as device addresses, however on
* USB-3.0 buses the address is assigned by the controller hardware
* and it usually is not the same as the device number.
*
* Devices connected under xHCI are not as simple. The host controller
* supports virtualization, so the hardware assigns device addresses and
* the HCD must setup data structures before issuing a set address
* command to the hardware.
*/
static void choose_devnum(struct usb_device *udev)
{
int devnum;
struct usb_bus *bus = udev->bus;
/* be safe when more hub events are proceed in parallel */
mutex_lock(&bus->devnum_next_mutex);
/* Try to allocate the next devnum beginning at bus->devnum_next. */
devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
bus->devnum_next);
if (devnum >= 128)
devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1);
bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
if (devnum < 128) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
mutex_unlock(&bus->devnum_next_mutex);
}
static void release_devnum(struct usb_device *udev)
{
if (udev->devnum > 0) {
clear_bit(udev->devnum, udev->bus->devmap.devicemap);
udev->devnum = -1;
}
}
static void update_devnum(struct usb_device *udev, int devnum)
{
udev->devnum = devnum;
if (!udev->devaddr)
udev->devaddr = (u8)devnum;
}
static void hub_free_dev(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Root hubs aren't real devices, so don't free HCD resources */
if (hcd->driver->free_dev && udev->parent)
hcd->driver->free_dev(hcd, udev);
}
static void hub_disconnect_children(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
int i;
/* Free up all the children before we remove this device */
for (i = 0; i < udev->maxchild; i++) {
if (hub->ports[i]->child)
usb_disconnect(&hub->ports[i]->child);
}
}
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
*
* Context: task context, might sleep
*
* Something got disconnected. Get rid of it and all of its children.
*
* If *pdev is a normal device then the parent hub must already be locked.
* If *pdev is a root hub then the caller must hold the usb_bus_idr_lock,
* which protects the set of root hubs as well as the list of buses.
*
* Only hub drivers (including virtual root hub drivers for host
* controllers) should ever call this.
*
* This call is synchronous, and may not be used in an interrupt context.
*/
void usb_disconnect(struct usb_device **pdev)
{
struct usb_port *port_dev = NULL;
struct usb_device *udev = *pdev;
struct usb_hub *hub = NULL;
int port1 = 1;
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
* this quiesces everything except pending urbs.
*/
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
dev_info(&udev->dev, "USB disconnect, device number %d\n",
udev->devnum);
/*
* Ensure that the pm runtime code knows that the USB device
* is in the process of being disconnected.
*/
pm_runtime_barrier(&udev->dev);
usb_lock_device(udev);
hub_disconnect_children(udev);
/* deallocate hcd/hardware state ... nuking all pending urbs and
* cleaning up all state associated with the current configuration
* so that the hardware is now fully quiesced.
*/
dev_dbg(&udev->dev, "unregistering device\n");
usb_disable_device(udev, 0);
usb_hcd_synchronize_unlinks(udev);
if (udev->parent) {
port1 = udev->portnum;
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[port1 - 1];
sysfs_remove_link(&udev->dev.kobj, "port");
sysfs_remove_link(&port_dev->dev.kobj, "device");
/*
* As usb_port_runtime_resume() de-references udev, make
* sure no resumes occur during removal
*/
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
}
usb_remove_ep_devs(&udev->ep0);
usb_unlock_device(udev);
/* Unregister the device. The device driver is responsible
* for de-configuring the device and invoking the remove-device
* notifier chain (used by usbfs and possibly others).
*/
device_del(&udev->dev);
/* Free the device number and delete the parent's children[]
* (or root_hub) pointer.
*/
release_devnum(udev);
/* Avoid races with recursively_mark_NOTATTACHED() */
spin_lock_irq(&device_state_lock);
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
if (port_dev && test_and_clear_bit(port1, hub->child_usage_bits))
pm_runtime_put(&port_dev->dev);
hub_free_dev(udev);
put_device(&udev->dev);
}
#ifdef CONFIG_USB_ANNOUNCE_NEW_DEVICES
static void show_string(struct usb_device *udev, char *id, char *string)
{
if (!string)
return;
dev_info(&udev->dev, "%s: %s\n", id, string);
}
static void announce_device(struct usb_device *udev)
{
u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice);
dev_info(&udev->dev,
"New USB device found, idVendor=%04x, idProduct=%04x, bcdDevice=%2x.%02x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
bcdDevice >> 8, bcdDevice & 0xff);
dev_info(&udev->dev,
"New USB device strings: Mfr=%d, Product=%d, SerialNumber=%d\n",
udev->descriptor.iManufacturer,
udev->descriptor.iProduct,
udev->descriptor.iSerialNumber);
show_string(udev, "Product", udev->product);
show_string(udev, "Manufacturer", udev->manufacturer);
show_string(udev, "SerialNumber", udev->serial);
}
#else
static inline void announce_device(struct usb_device *udev) { }
#endif
/**
* usb_enumerate_device_otg - FIXME (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* Finish enumeration for On-The-Go devices
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device_otg(struct usb_device *udev)
{
int err = 0;
#ifdef CONFIG_USB_OTG
/*
* OTG-aware devices on OTG-capable root hubs may be able to use SRP,
* to wake us after we've powered off VBUS; and HNP, switching roles
* "host" to "peripheral". The OTG descriptor helps figure this out.
*/
if (!udev->bus->is_b_host
&& udev->config
&& udev->parent == udev->bus->root_hub) {
struct usb_otg_descriptor *desc = NULL;
struct usb_bus *bus = udev->bus;
unsigned port1 = udev->portnum;
/* descriptor may appear anywhere in config */
err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
le16_to_cpu(udev->config[0].desc.wTotalLength),
USB_DT_OTG, (void **) &desc, sizeof(*desc));
if (err || !(desc->bmAttributes & USB_OTG_HNP))
return 0;
dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n",
(port1 == bus->otg_port) ? "" : "non-");
/* enable HNP before suspend, it's simpler */
if (port1 == bus->otg_port) {
bus->b_hnp_enable = 1;
err = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, 0,
USB_DEVICE_B_HNP_ENABLE,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (err < 0) {
/*
* OTG MESSAGE: report errors here,
* customize to match your product.
*/
dev_err(&udev->dev, "can't set HNP mode: %d\n",
err);
bus->b_hnp_enable = 0;
}
} else if (desc->bLength == sizeof
(struct usb_otg_descriptor)) {
/* Set a_alt_hnp_support for legacy otg device */
err = usb_control_msg(udev,
usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, 0,
USB_DEVICE_A_ALT_HNP_SUPPORT,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
if (err < 0)
dev_err(&udev->dev,
"set a_alt_hnp_support failed: %d\n",
err);
}
}
#endif
return err;
}
/**
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is only called by usb_new_device() -- all comments that apply there
* apply here wrt to environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
* until it has been authorized.
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device(struct usb_device *udev)
{
int err;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (udev->config == NULL) {
err = usb_get_configuration(udev);
if (err < 0) {
if (err != -ENODEV)
dev_err(&udev->dev, "can't read configurations, error %d\n",
err);
return err;
}
}
/* read the standard strings and cache them if present */
udev->product = usb_cache_string(udev, udev->descriptor.iProduct);
udev->manufacturer = usb_cache_string(udev,
udev->descriptor.iManufacturer);
udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
err = usb_enumerate_device_otg(udev);
if (err < 0)
return err;
if (IS_ENABLED(CONFIG_USB_OTG_PRODUCTLIST) && hcd->tpl_support &&
!is_targeted(udev)) {
/* Maybe it can talk to us, though we can't talk to it.
* (Includes HNP test device.)
*/
if (IS_ENABLED(CONFIG_USB_OTG) && (udev->bus->b_hnp_enable
|| udev->bus->is_b_host)) {
err = usb_port_suspend(udev, PMSG_AUTO_SUSPEND);
if (err < 0)
dev_dbg(&udev->dev, "HNP fail, %d\n", err);
}
return -ENOTSUPP;
}
usb_detect_interface_quirks(udev);
return 0;
}
static void set_usb_port_removable(struct usb_device *udev)
{
struct usb_device *hdev = udev->parent;
struct usb_hub *hub;
u8 port = udev->portnum;
u16 wHubCharacteristics;
bool removable = true;
dev_set_removable(&udev->dev, DEVICE_REMOVABLE_UNKNOWN);
if (!hdev)
return;
hub = usb_hub_to_struct_hub(udev->parent);
/*
* If the platform firmware has provided information about a port,
* use that to determine whether it's removable.
*/
switch (hub->ports[udev->portnum - 1]->connect_type) {
case USB_PORT_CONNECT_TYPE_HOT_PLUG:
dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
return;
case USB_PORT_CONNECT_TYPE_HARD_WIRED:
case USB_PORT_NOT_USED:
dev_set_removable(&udev->dev, DEVICE_FIXED);
return;
default:
break;
}
/*
* Otherwise, check whether the hub knows whether a port is removable
* or not
*/
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (!(wHubCharacteristics & HUB_CHAR_COMPOUND))
return;
if (hub_is_superspeed(hdev)) {
if (le16_to_cpu(hub->descriptor->u.ss.DeviceRemovable)
& (1 << port))
removable = false;
} else {
if (hub->descriptor->u.hs.DeviceRemovable[port / 8] & (1 << (port % 8)))
removable = false;
}
if (removable)
dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
else
dev_set_removable(&udev->dev, DEVICE_FIXED);
}
/**
* usb_new_device - perform initial device setup (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
* This is called with devices which have been detected but not fully
* enumerated. The device descriptor is available, but not descriptors
* for any device configuration. The caller must have locked either
* the parent hub (if udev is a normal device) or else the
* usb_bus_idr_lock (if udev is a root hub). The parent's pointer to
* udev has already been installed, but udev is not yet visible through
* sysfs or other filesystem code.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Only the hub driver or root-hub registrar should ever call this.
*
* Return: Whether the device is configured properly or not. Zero if the
* interface was registered with the driver core; else a negative errno
* value.
*
*/
int usb_new_device(struct usb_device *udev)
{
int err;
if (udev->parent) {
/* Initialize non-root-hub device wakeup to disabled;
* device (un)configuration controls wakeup capable
* sysfs power/wakeup controls wakeup enabled/disabled
*/
device_init_wakeup(&udev->dev, 0);
}
/* Tell the runtime-PM framework the device is active */
pm_runtime_set_active(&udev->dev);
pm_runtime_get_noresume(&udev->dev);
pm_runtime_use_autosuspend(&udev->dev);
pm_runtime_enable(&udev->dev);
/* By default, forbid autosuspend for all devices. It will be
* allowed for hubs during binding.
*/
usb_disable_autosuspend(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
udev->devnum, udev->bus->busnum,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
/* export the usbdev device-node for libusb */
udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
(((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
/* Tell the world! */
announce_device(udev);
if (udev->serial)
add_device_randomness(udev->serial, strlen(udev->serial));
if (udev->product)
add_device_randomness(udev->product, strlen(udev->product));
if (udev->manufacturer)
add_device_randomness(udev->manufacturer,
strlen(udev->manufacturer));
device_enable_async_suspend(&udev->dev);
/* check whether the hub or firmware marks this port as non-removable */
set_usb_port_removable(udev);
/* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device
* notifier chain (used by usbfs and possibly others).
*/
err = device_add(&udev->dev);
if (err) {
dev_err(&udev->dev, "can't device_add, error %d\n", err);
goto fail;
}
/* Create link files between child device and usb port device. */
if (udev->parent) {
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int port1 = udev->portnum;
struct usb_port *port_dev = hub->ports[port1 - 1];
err = sysfs_create_link(&udev->dev.kobj,
&port_dev->dev.kobj, "port");
if (err)
goto fail;
err = sysfs_create_link(&port_dev->dev.kobj,
&udev->dev.kobj, "device");
if (err) {
sysfs_remove_link(&udev->dev.kobj, "port");
goto fail;
}
if (!test_and_set_bit(port1, hub->child_usage_bits))
pm_runtime_get_sync(&port_dev->dev);
}
(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
usb_mark_last_busy(udev);
pm_runtime_put_sync_autosuspend(&udev->dev);
return err;
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
pm_runtime_disable(&udev->dev);
pm_runtime_set_suspended(&udev->dev);
return err;
}
/**
* usb_deauthorize_device - deauthorize a device (usbcore-internal)
* @usb_dev: USB device
*
* Move the USB device to a very basic state where interfaces are disabled
* and the device is in fact unconfigured and unusable.
*
* We share a lock (that we have) with device_del(), so we need to
* defer its call.
*
* Return: 0.
*/
int usb_deauthorize_device(struct usb_device *usb_dev)
{
usb_lock_device(usb_dev);
if (usb_dev->authorized == 0)
goto out_unauthorized;
usb_dev->authorized = 0;
usb_set_configuration(usb_dev, -1);
out_unauthorized:
usb_unlock_device(usb_dev);
return 0;
}
int usb_authorize_device(struct usb_device *usb_dev)
{
int result = 0, c;
usb_lock_device(usb_dev);
if (usb_dev->authorized == 1)
goto out_authorized;
result = usb_autoresume_device(usb_dev);
if (result < 0) {
dev_err(&usb_dev->dev,
"can't autoresume for authorization: %d\n", result);
goto error_autoresume;
}
usb_dev->authorized = 1;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
c = usb_choose_configuration(usb_dev);
if (c >= 0) {
result = usb_set_configuration(usb_dev, c);
if (result) {
dev_err(&usb_dev->dev,
"can't set config #%d, error %d\n", c, result);
/* This need not be fatal. The user can try to
* set other configurations. */
}
}
dev_info(&usb_dev->dev, "authorized to connect\n");
usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
usb_unlock_device(usb_dev); /* complements locktree */
return result;
}
/**
* get_port_ssp_rate - Match the extended port status to SSP rate
* @hdev: The hub device
* @ext_portstatus: extended port status
*
* Match the extended port status speed id to the SuperSpeed Plus sublink speed
* capability attributes. Base on the number of connected lanes and speed,
* return the corresponding enum usb_ssp_rate.
*/
static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
u32 ext_portstatus)
{
struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
u32 attr;
u8 speed_id;
u8 ssac;
u8 lanes;
int i;
if (!ssp_cap)
goto out;
speed_id = ext_portstatus & USB_EXT_PORT_STAT_RX_SPEED_ID;
lanes = USB_EXT_PORT_RX_LANES(ext_portstatus) + 1;
ssac = le32_to_cpu(ssp_cap->bmAttributes) &
USB_SSP_SUBLINK_SPEED_ATTRIBS;
for (i = 0; i <= ssac; i++) {
u8 ssid;
attr = le32_to_cpu(ssp_cap->bmSublinkSpeedAttr[i]);
ssid = FIELD_GET(USB_SSP_SUBLINK_SPEED_SSID, attr);
if (speed_id == ssid) {
u16 mantissa;
u8 lse;
u8 type;
/*
* Note: currently asymmetric lane types are only
* applicable for SSIC operate in SuperSpeed protocol
*/
type = FIELD_GET(USB_SSP_SUBLINK_SPEED_ST, attr);
if (type == USB_SSP_SUBLINK_SPEED_ST_ASYM_RX ||
type == USB_SSP_SUBLINK_SPEED_ST_ASYM_TX)
goto out;
if (FIELD_GET(USB_SSP_SUBLINK_SPEED_LP, attr) !=
USB_SSP_SUBLINK_SPEED_LP_SSP)
goto out;
lse = FIELD_GET(USB_SSP_SUBLINK_SPEED_LSE, attr);
mantissa = FIELD_GET(USB_SSP_SUBLINK_SPEED_LSM, attr);
/* Convert to Gbps */
for (; lse < USB_SSP_SUBLINK_SPEED_LSE_GBPS; lse++)
mantissa /= 1000;
if (mantissa >= 10 && lanes == 1)
return USB_SSP_GEN_2x1;
if (mantissa >= 10 && lanes == 2)
return USB_SSP_GEN_2x2;
if (mantissa >= 5 && lanes == 2)
return USB_SSP_GEN_1x2;
goto out;
}
}
out:
return USB_SSP_GEN_UNKNOWN;
}
#ifdef CONFIG_USB_FEW_INIT_RETRIES
#define PORT_RESET_TRIES 2
#define SET_ADDRESS_TRIES 1
#define GET_DESCRIPTOR_TRIES 1
#define GET_MAXPACKET0_TRIES 1
#define PORT_INIT_TRIES 4
#else
#define PORT_RESET_TRIES 5
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define GET_MAXPACKET0_TRIES 3
#define PORT_INIT_TRIES 4
#endif /* CONFIG_USB_FEW_INIT_RETRIES */
#define DETECT_DISCONNECT_TRIES 5
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 800
static bool use_new_scheme(struct usb_device *udev, int retry,
struct usb_port *port_dev)
{
int old_scheme_first_port =
(port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME) ||
old_scheme_first;
/*
* "New scheme" enumeration causes an extra state transition to be
* exposed to an xhci host and causes USB3 devices to receive control
* commands in the default state. This has been seen to cause
* enumeration failures, so disable this enumeration scheme for USB3
* devices.
*/
if (udev->speed >= USB_SPEED_SUPER)
return false;
/*
* If use_both_schemes is set, use the first scheme (whichever
* it is) for the larger half of the retries, then use the other
* scheme. Otherwise, use the first scheme for all the retries.
*/
if (use_both_schemes && retry >= (PORT_INIT_TRIES + 1) / 2)
return old_scheme_first_port; /* Second half */
return !old_scheme_first_port; /* First half or all */
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
* Port warm reset is required to recover
*/
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus)
{
u16 link_state;
if (!hub_is_superspeed(hub->hdev))
return false;
if (test_bit(port1, hub->warm_reset_bits))
return true;
link_state = portstatus & USB_PORT_STAT_LINK_STATE;
return link_state == USB_SS_PORT_LS_SS_INACTIVE
|| link_state == USB_SS_PORT_LS_COMP_MOD;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm)
{
int delay_time, ret;
u16 portstatus;
u16 portchange;
u32 ext_portstatus = 0;
for (delay_time = 0;
delay_time < HUB_RESET_TIMEOUT;
delay_time += delay) {
/* wait to give the device a chance to reset */
msleep(delay);
/* read and decode port status */
if (hub_is_superspeedplus(hub->hdev))
ret = hub_ext_port_status(hub, port1,
HUB_EXT_PORT_STATUS,
&portstatus, &portchange,
&ext_portstatus);
else
ret = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
if (ret < 0)
return ret;
/*
* The port state is unknown until the reset completes.
*
* On top of that, some chips may require additional time
* to re-establish a connection after the reset is complete,
* so also wait for the connection to be re-established.
*/
if (!(portstatus & USB_PORT_STAT_RESET) &&
(portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
delay = HUB_LONG_RESET_TIME;
dev_dbg(&hub->ports[port1 - 1]->dev,
"not %sreset yet, waiting %dms\n",
warm ? "warm " : "", delay);
}
if ((portstatus & USB_PORT_STAT_RESET))
return -EBUSY;
if (hub_port_warm_reset_required(hub, port1, portstatus))
return -ENOTCONN;
/* Device went away? */
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
/* Retry if connect change is set but status is still connected.
* A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
(portchange & USB_PORT_STAT_C_CONNECTION)) {
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
return -EAGAIN;
}
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
if (!udev)
return 0;
if (hub_is_superspeedplus(hub->hdev)) {
/* extended portstatus Rx and Tx lane count are zero based */
udev->rx_lanes = USB_EXT_PORT_RX_LANES(ext_portstatus) + 1;
udev->tx_lanes = USB_EXT_PORT_TX_LANES(ext_portstatus) + 1;
udev->ssp_rate = get_port_ssp_rate(hub->hdev, ext_portstatus);
} else {
udev->rx_lanes = 1;
udev->tx_lanes = 1;
udev->ssp_rate = USB_SSP_GEN_UNKNOWN;
}
if (udev->ssp_rate != USB_SSP_GEN_UNKNOWN)
udev->speed = USB_SPEED_SUPER_PLUS;
else if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
udev->speed = USB_SPEED_LOW;
else
udev->speed = USB_SPEED_FULL;
return 0;
}
/* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm)
{
int i, status;
u16 portchange, portstatus;
struct usb_port *port_dev = hub->ports[port1 - 1];
int reset_recovery_time;
if (!hub_is_superspeed(hub->hdev)) {
if (warm) {
dev_err(hub->intfdev, "only USB3 hub support "
"warm reset\n");
return -EINVAL;
}
/* Block EHCI CF initialization during the port reset.
* Some companion controllers don't like it when they mix.
*/
down_read(&ehci_cf_port_reset_rwsem);
} else if (!warm) {
/*
* If the caller hasn't explicitly requested a warm reset,
* double check and see if one is needed.
*/
if (usb_hub_port_status(hub, port1, &portstatus,
&portchange) == 0)
if (hub_port_warm_reset_required(hub, port1,
portstatus))
warm = true;
}
clear_bit(port1, hub->warm_reset_bits);
/* Reset the port */
for (i = 0; i < PORT_RESET_TRIES; i++) {
status = set_port_feature(hub->hdev, port1, (warm ?
USB_PORT_FEAT_BH_PORT_RESET :
USB_PORT_FEAT_RESET));
if (status == -ENODEV) {
; /* The hub is gone */
} else if (status) {
dev_err(&port_dev->dev,
"cannot %sreset (err = %d)\n",
warm ? "warm " : "", status);
} else {
status = hub_port_wait_reset(hub, port1, udev, delay,
warm);
if (status && status != -ENOTCONN && status != -ENODEV)
dev_dbg(hub->intfdev,
"port_wait_reset: err = %d\n",
status);
}
/*
* Check for disconnect or reset, and bail out after several
* reset attempts to avoid warm reset loop.
*/
if (status == 0 || status == -ENOTCONN || status == -ENODEV ||
(status == -EBUSY && i == PORT_RESET_TRIES - 1)) {
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_RESET);
if (!hub_is_superspeed(hub->hdev))
goto done;
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
if (udev)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
/*
* If a USB 3.0 device migrates from reset to an error
* state, re-issue the warm reset.
*/
if (usb_hub_port_status(hub, port1,
&portstatus, &portchange) < 0)
goto done;
if (!hub_port_warm_reset_required(hub, port1,
portstatus))
goto done;
/*
* If the port is in SS.Inactive or Compliance Mode, the
* hot or warm reset failed. Try another warm reset.
*/
if (!warm) {
dev_dbg(&port_dev->dev,
"hot reset failed, warm reset\n");
warm = true;
}
}
dev_dbg(&port_dev->dev,
"not enabled, trying %sreset again...\n",
warm ? "warm " : "");
delay = HUB_LONG_RESET_TIME;
}
dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n");
done:
if (status == 0) {
if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
usleep_range(10000, 12000);
else {
/* TRSTRCY = 10 ms; plus some extra */
reset_recovery_time = 10 + 40;
/* Hub needs extra delay after resetting its port. */
if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
reset_recovery_time += 100;
msleep(reset_recovery_time);
}
if (udev) {
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
update_devnum(udev, 0);
/* The xHC may think the device is already reset,
* so ignore the status.
*/
if (hcd->driver->reset_device)
hcd->driver->reset_device(hcd, udev);
usb_set_device_state(udev, USB_STATE_DEFAULT);
}
} else {
if (udev)
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
}
if (!hub_is_superspeed(hub->hdev))
up_read(&ehci_cf_port_reset_rwsem);
return status;
}
/*
* hub_port_stop_enumerate - stop USB enumeration or ignore port events
* @hub: target hub
* @port1: port num of the port
* @retries: port retries number of hub_port_init()
*
* Return:
* true: ignore port actions/events or give up connection attempts.
* false: keep original behavior.
*
* This function will be based on retries to check whether the port which is
* marked with early_stop attribute would stop enumeration or ignore events.
*
* Note:
* This function didn't change anything if early_stop is not set, and it will
* prevent all connection attempts when early_stop is set and the attempts of
* the port are more than 1.
*/
static bool hub_port_stop_enumerate(struct usb_hub *hub, int port1, int retries)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
if (port_dev->early_stop) {
if (port_dev->ignore_event)
return true;
/*
* We want unsuccessful attempts to fail quickly.
* Since some devices may need one failure during
* port initialization, we allow two tries but no
* more.
*/
if (retries < 2)
return false;
port_dev->ignore_event = 1;
} else
port_dev->ignore_event = 0;
return port_dev->ignore_event;
}
/* Check if a port is power on */
int usb_port_is_power_on(struct usb_hub *hub, unsigned int portstatus)
{
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portstatus & USB_SS_PORT_STAT_POWER)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_POWER)
ret = 1;
}
return ret;
}
static void usb_lock_port(struct usb_port *port_dev)
__acquires(&port_dev->status_lock)
{
mutex_lock(&port_dev->status_lock);
__acquire(&port_dev->status_lock);
}
static void usb_unlock_port(struct usb_port *port_dev)
__releases(&port_dev->status_lock)
{
mutex_unlock(&port_dev->status_lock);
__release(&port_dev->status_lock);
}
#ifdef CONFIG_PM
/* Check if a port is suspended(USB2.0 port) or in U3 state(USB3.0 port) */
static int port_is_suspended(struct usb_hub *hub, unsigned portstatus)
{
int ret = 0;
if (hub_is_superspeed(hub->hdev)) {
if ((portstatus & USB_PORT_STAT_LINK_STATE)
== USB_SS_PORT_LS_U3)
ret = 1;
} else {
if (portstatus & USB_PORT_STAT_SUSPEND)
ret = 1;
}
return ret;
}
/* Determine whether the device on a port is ready for a normal resume,
* is ready for a reset-resume, or should be disconnected.
*/
static int check_port_resume_type(struct usb_device *udev,
struct usb_hub *hub, int port1,
int status, u16 portchange, u16 portstatus)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
int retries = 3;
retry:
/* Is a warm reset needed to recover the connection? */
if (status == 0 && udev->reset_resume
&& hub_port_warm_reset_required(hub, port1, portstatus)) {
/* pass */;
}
/* Is the device still present? */
else if (status || port_is_suspended(hub, portstatus) ||
!usb_port_is_power_on(hub, portstatus)) {
if (status >= 0)
status = -ENODEV;
} else if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
if (retries--) {
usleep_range(200, 300);
status = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
goto retry;
}
status = -ENODEV;
}
/* Can't do a normal resume if the port isn't enabled,
* so try a reset-resume instead.
*/
else if (!(portstatus & USB_PORT_STAT_ENABLE) && !udev->reset_resume) {
if (udev->persist_enabled)
udev->reset_resume = 1;
else
status = -ENODEV;
}
if (status) {
dev_dbg(&port_dev->dev, "status %04x.%04x after resume, %d\n",
portchange, portstatus, status);
} else if (udev->reset_resume) {
/* Late port handoff can set status-change bits */
if (portchange & USB_PORT_STAT_C_CONNECTION)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
if (portchange & USB_PORT_STAT_C_ENABLE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
/*
* Whatever made this reset-resume necessary may have
* turned on the port1 bit in hub->change_bits. But after
* a successful reset-resume we want the bit to be clear;
* if it was on it would indicate that something happened
* following the reset-resume.
*/
clear_bit(port1, hub->change_bits);
}
return status;
}
int usb_disable_ltm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Check if the roothub and device supports LTM. */
if (!usb_device_supports_ltm(hcd->self.root_hub) ||
!usb_device_supports_ltm(udev))
return 0;
/* Clear Feature LTM Enable can only be sent if the device is
* configured.
*/
if (!udev->actconfig)
return 0;
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
EXPORT_SYMBOL_GPL(usb_disable_ltm);
void usb_enable_ltm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* Check if the roothub and device supports LTM. */
if (!usb_device_supports_ltm(hcd->self.root_hub) ||
!usb_device_supports_ltm(udev))
return;
/* Set Feature LTM Enable can only be sent if the device is
* configured.
*/
if (!udev->actconfig)
return;
usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_LTM_ENABLE, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
EXPORT_SYMBOL_GPL(usb_enable_ltm);
/*
* usb_enable_remote_wakeup - enable remote wakeup for a device
* @udev: target device
*
* For USB-2 devices: Set the device's remote wakeup feature.
*
* For USB-3 devices: Assume there's only one function on the device and
* enable remote wake for the first interface. FIXME if the interface
* association descriptor shows there's more than one function.
*/
static int usb_enable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND,
USB_INTRF_FUNC_SUSPEND_RW |
USB_INTRF_FUNC_SUSPEND_LP,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
/*
* usb_disable_remote_wakeup - disable remote wakeup for a device
* @udev: target device
*
* For USB-2 devices: Clear the device's remote wakeup feature.
*
* For USB-3 devices: Assume there's only one function on the device and
* disable remote wake for the first interface. FIXME if the interface
* association descriptor shows there's more than one function.
*/
static int usb_disable_remote_wakeup(struct usb_device *udev)
{
if (udev->speed < USB_SPEED_SUPER)
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
else
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
/* Count of wakeup-enabled devices at or below udev */
unsigned usb_wakeup_enabled_descendants(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
return udev->do_remote_wakeup +
(hub ? hub->wakeup_enabled_descendants : 0);
}
EXPORT_SYMBOL_GPL(usb_wakeup_enabled_descendants);
/*
* usb_port_suspend - suspend a usb device's upstream port
* @udev: device that's no longer in active use, not a root hub
* Context: must be able to sleep; device not locked; pm locks held
*
* Suspends a USB device that isn't in active use, conserving power.
* Devices may wake out of a suspend, if anything important happens,
* using the remote wakeup mechanism. They may also be taken out of
* suspend by the host, using usb_port_resume(). It's also routine
* to disconnect devices while they are suspended.
*
* This only affects the USB hardware for a device; its interfaces
* (and, for hubs, child devices) must already have been suspended.
*
* Selective port suspend reduces power; most suspended devices draw
* less than 500 uA. It's also used in OTG, along with remote wakeup.
* All devices below the suspended port are also suspended.
*
* Devices leave suspend state when the host wakes them up. Some devices
* also support "remote wakeup", where the device can activate the USB
* tree above them to deliver data, such as a keypress or packet. In
* some cases, this wakes the USB host.
*
* Suspending OTG devices may trigger HNP, if that's been enabled
* between a pair of dual-role devices. That will change roles, such
* as from A-Host to A-Peripheral or from B-Host back to B-Peripheral.
*
* Devices on USB hub ports have only one "suspend" state, corresponding
* to ACPI D2, "may cause the device to lose some context".
* State transitions include:
*
* - suspend, resume ... when the VBUS power link stays live
* - suspend, disconnect ... VBUS lost
*
* Once VBUS drop breaks the circuit, the port it's using has to go through
* normal re-enumeration procedures, starting with enabling VBUS power.
* Other than re-initializing the hub (plug/unplug, except for root hubs),
* Linux (2.6) currently has NO mechanisms to initiate that: no hub_wq
* timer, no SRP, no requests through sysfs.
*
* If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
* suspended until their bus goes into global suspend (i.e., the root
* hub is suspended). Nevertheless, we change @udev->state to
* USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
* upstream port setting is stored in @udev->port_is_suspended.
*
* Returns 0 on success, else negative errno.
*/
int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
struct usb_port *port_dev = hub->ports[udev->portnum - 1];
int port1 = udev->portnum;
int status;
bool really_suspend = true;
usb_lock_port(port_dev);
/* enable remote wakeup when appropriate; this lets the device
* wake up the upstream hub (including maybe the root hub).
*
* NOTE: OTG devices may issue remote wakeup (or SRP) even when
* we don't explicitly enable it here.
*/
if (udev->do_remote_wakeup) {
status = usb_enable_remote_wakeup(udev);
if (status) {
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
goto err_wakeup;
}
}
/* disable USB2 hardware LPM */
usb_disable_usb2_hardware_lpm(udev);
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
status = -ENOMEM;
if (PMSG_IS_AUTO(msg))
goto err_ltm;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* into suspend a few ms after the root hub stops sending packets.
* The USB 2.0 spec calls this "global suspend".
*
* However, many USB hubs have a bug: They don't relay wakeup requests
* from a downstream port if the port's suspend feature isn't on.
* Therefore we will turn on the suspend feature if udev or any of its
* descendants is enabled for remote wakeup.
*/
else if (PMSG_IS_AUTO(msg) || usb_wakeup_enabled_descendants(udev) > 0)
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
else {
really_suspend = false;
status = 0;
}
if (status) {
/* Check if the port has been suspended for the timeout case
* to prevent the suspended port from incorrect handling.
*/
if (status == -ETIMEDOUT) {
int ret;
u16 portstatus, portchange;
portstatus = portchange = 0;
ret = usb_hub_port_status(hub, port1, &portstatus,
&portchange);
dev_dbg(&port_dev->dev,
"suspend timeout, status %04x\n", portstatus);
if (ret == 0 && port_is_suspended(hub, portstatus)) {
status = 0;
goto suspend_done;
}
}
dev_dbg(&port_dev->dev, "can't suspend, status %d\n", status);
/* Try to enable USB3 LTM again */
usb_enable_ltm(udev);
err_ltm:
/* Try to enable USB2 hardware LPM again */
usb_enable_usb2_hardware_lpm(udev);
if (udev->do_remote_wakeup)
(void) usb_disable_remote_wakeup(udev);
err_wakeup:
/* System sleep transitions should never fail */
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
suspend_done:
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
if (really_suspend) {
udev->port_is_suspended = 1;
/* device has up to 10 msec to fully suspend */
msleep(10);
}
usb_set_device_state(udev, USB_STATE_SUSPENDED);
}
if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled
&& test_and_clear_bit(port1, hub->child_usage_bits))
pm_runtime_put_sync(&port_dev->dev);
usb_mark_last_busy(hub->hdev);
usb_unlock_port(port_dev);
return status;
}
/*
* If the USB "suspend" state is in use (rather than "global suspend"),
* many devices will be individually taken out of suspend state using
* special "resume" signaling. This routine kicks in shortly after
* hardware resume signaling is finished, either because of selective
* resume (by host) or remote wakeup (by device) ... now see what changed
* in the tree that's rooted at this device.
*
* If @udev->reset_resume is set then the device is reset before the
* status check is done.
*/
static int finish_port_resume(struct usb_device *udev)
{
int status = 0;
u16 devstatus = 0;
/* caller owns the udev device lock */
dev_dbg(&udev->dev, "%s\n",
udev->reset_resume ? "finish reset-resume" : "finish resume");
/* usb ch9 identifies four variants of SUSPENDED, based on what
* state the device resumes to. Linux currently won't see the
* first two on the host side; they'd be inside hub_port_init()
* during many timeouts, but hub_wq can't suspend until later.
*/
usb_set_device_state(udev, udev->actconfig
? USB_STATE_CONFIGURED
: USB_STATE_ADDRESS);
/* 10.5.4.5 says not to reset a suspended port if the attached
* device is enabled for remote wakeup. Hence the reset
* operation is carried out here, after the port has been
* resumed.
*/
if (udev->reset_resume) {
/*
* If the device morphs or switches modes when it is reset,
* we don't want to perform a reset-resume. We'll fail the
* resume, which will cause a logical disconnect, and then
* the device will be rediscovered.
*/
retry_reset_resume:
if (udev->quirks & USB_QUIRK_RESET)
status = -ENODEV;
else
status = usb_reset_and_verify_device(udev);
}
/* 10.5.4.5 says be sure devices in the tree are still there.
* For now let's assume the device didn't go crazy on resume,
* and device drivers will know about any resume quirks.
*/
if (status == 0) {
devstatus = 0;
status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
/* If a normal resume failed, try doing a reset-resume */
if (status && !udev->reset_resume && udev->persist_enabled) {
dev_dbg(&udev->dev, "retry with reset-resume\n");
udev->reset_resume = 1;
goto retry_reset_resume;
}
}
if (status) {
dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
status);
/*
* There are a few quirky devices which violate the standard
* by claiming to have remote wakeup enabled after a reset,
* which crash if the feature is cleared, hence check for
* udev->reset_resume
*/
} else if (udev->actconfig && !udev->reset_resume) {
if (udev->speed < USB_SPEED_SUPER) {
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
status = usb_disable_remote_wakeup(udev);
} else {
status = usb_get_std_status(udev, USB_RECIP_INTERFACE, 0,
&devstatus);
if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
| USB_INTRF_STAT_FUNC_RW))
status = usb_disable_remote_wakeup(udev);
}
if (status)
dev_dbg(&udev->dev,
"disable remote wakeup, status %d\n",
status);
status = 0;
}
return status;
}
/*
* There are some SS USB devices which take longer time for link training.
* XHCI specs 4.19.4 says that when Link training is successful, port
* sets CCS bit to 1. So if SW reads port status before successful link
* training, then it will not find device to be present.
* USB Analyzer log with such buggy devices show that in some cases
* device switch on the RX termination after long delay of host enabling
* the VBUS. In few other cases it has been seen that device fails to
* negotiate link training in first attempt. It has been
* reported till now that few devices take as long as 2000 ms to train
* the link after host enabling its VBUS and termination. Following
* routine implements a 2000 ms timeout for link training. If in a case
* link trains before timeout, loop will exit earlier.
*
* There are also some 2.0 hard drive based devices and 3.0 thumb
* drives that, when plugged into a 2.0 only port, take a long
* time to set CCS after VBUS enable.
*
* FIXME: If a device was connected before suspend, but was removed
* while system was asleep, then the loop in the following routine will
* only exit at timeout.
*
* This routine should only be called when persist is enabled.
*/
static int wait_for_connected(struct usb_device *udev,
struct usb_hub *hub, int port1,
u16 *portchange, u16 *portstatus)
{
int status = 0, delay_ms = 0;
while (delay_ms < 2000) {
if (status || *portstatus & USB_PORT_STAT_CONNECTION)
break;
if (!usb_port_is_power_on(hub, *portstatus)) {
status = -ENODEV;
break;
}
msleep(20);
delay_ms += 20;
status = usb_hub_port_status(hub, port1, portstatus, portchange);
}
dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms);
return status;
}
/*
* usb_port_resume - re-activate a suspended usb device's upstream port
* @udev: device to re-activate, not a root hub
* Context: must be able to sleep; device not locked; pm locks held
*
* This will re-activate the suspended device, increasing power usage
* while letting drivers communicate again with its endpoints.
* USB resume explicitly guarantees that the power session between
* the host and the device is the same as it was when the device
* suspended.
*
* If @udev->reset_resume is set then this routine won't check that the
* port is still enabled. Furthermore, finish_port_resume() above will
* reset @udev. The end result is that a broken power session can be
* recovered and @udev will appear to persist across a loss of VBUS power.
*
* For example, if a host controller doesn't maintain VBUS suspend current
* during a system sleep or is reset when the system wakes up, all the USB
* power sessions below it will be broken. This is especially troublesome
* for mass-storage devices containing mounted filesystems, since the
* device will appear to have disconnected and all the memory mappings
* to it will be lost. Using the USB_PERSIST facility, the device can be
* made to appear as if it had not disconnected.
*
* This facility can be dangerous. Although usb_reset_and_verify_device() makes
* every effort to insure that the same device is present after the
* reset as before, it cannot provide a 100% guarantee. Furthermore it's
* quite possible for a device to remain unaltered but its media to be
* changed. If the user replaces a flash memory card while the system is
* asleep, he will have only himself to blame when the filesystem on the
* new card is corrupted and the system crashes.
*
* Returns 0 on success, else negative errno.
*/
int usb_port_resume(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
struct usb_port *port_dev = hub->ports[udev->portnum - 1];
int port1 = udev->portnum;
int status;
u16 portchange, portstatus;
if (!test_and_set_bit(port1, hub->child_usage_bits)) {
status = pm_runtime_resume_and_get(&port_dev->dev);
if (status < 0) {
dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
status);
return status;
}
}
usb_lock_port(port_dev);
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (status == 0 && !port_is_suspended(hub, portstatus)) {
if (portchange & USB_PORT_STAT_C_SUSPEND)
pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
}
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U0);
else
status = usb_clear_port_feature(hub->hdev,
port1, USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
} else {
/* drive resume for USB_RESUME_TIMEOUT msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
msleep(USB_RESUME_TIMEOUT);
/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
* sequence.
*/
status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
}
SuspendCleared:
if (status == 0) {
udev->port_is_suspended = 0;
if (hub_is_superspeed(hub->hdev)) {
if (portchange & USB_PORT_STAT_C_LINK_STATE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
} else {
if (portchange & USB_PORT_STAT_C_SUSPEND)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
/* TRSMRCY = 10 msec */
msleep(10);
}
if (udev->persist_enabled)
status = wait_for_connected(udev, hub, port1, &portchange,
&portstatus);
status = check_port_resume_type(udev,
hub, port1, status, portchange, portstatus);
if (status == 0)
status = finish_port_resume(udev);
if (status < 0) {
dev_dbg(&udev->dev, "can't resume, status %d\n", status);
hub_port_logical_disconnect(hub, port1);
} else {
/* Try to enable USB2 hardware LPM */
usb_enable_usb2_hardware_lpm(udev);
/* Try to enable USB3 LTM */
usb_enable_ltm(udev);
}
usb_unlock_port(port_dev);
return status;
}
int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
usb_lock_device(udev);
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
status = usb_autoresume_device(udev);
if (status == 0) {
/* Let the drivers do their thing, then... */
usb_autosuspend_device(udev);
}
}
usb_unlock_device(udev);
return status;
}
/* Returns 1 if there was a remote wakeup and a connect status change. */
static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
u16 portstatus, u16 portchange)
__must_hold(&port_dev->status_lock)
{
struct usb_port *port_dev = hub->ports[port - 1];
struct usb_device *hdev;
struct usb_device *udev;
int connect_change = 0;
u16 link_state;
int ret;
hdev = hub->hdev;
udev = port_dev->child;
if (!hub_is_superspeed(hdev)) {
if (!(portchange & USB_PORT_STAT_C_SUSPEND))
return 0;
usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
} else {
link_state = portstatus & USB_PORT_STAT_LINK_STATE;
if (!udev || udev->state != USB_STATE_SUSPENDED ||
(link_state != USB_SS_PORT_LS_U0 &&
link_state != USB_SS_PORT_LS_U1 &&
link_state != USB_SS_PORT_LS_U2))
return 0;
}
if (udev) {
/* TRSMRCY = 10 msec */
msleep(10);
usb_unlock_port(port_dev);
ret = usb_remote_wakeup(udev);
usb_lock_port(port_dev);
if (ret < 0)
connect_change = 1;
} else {
ret = -ENODEV;
hub_port_disable(hub, port, 1);
}
dev_dbg(&port_dev->dev, "resume, status %d\n", ret);
return connect_change;
}
static int check_ports_changed(struct usb_hub *hub)
{
int port1;
for (port1 = 1; port1 <= hub->hdev->maxchild; ++port1) {
u16 portstatus, portchange;
int status;
status = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (!status && portchange)
return 1;
}
return 0;
}
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = hub->hdev;
unsigned port1;
/*
* Warn if children aren't already suspended.
* Also, add up the number of wakeup-enabled descendants.
*/
hub->wakeup_enabled_descendants = 0;
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
if (udev && udev->can_submit) {
dev_warn(&port_dev->dev, "device %s not suspended yet\n",
dev_name(&udev->dev));
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
if (udev)
hub->wakeup_enabled_descendants +=
usb_wakeup_enabled_descendants(udev);
}
if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
/* check if there are changes pending on hub ports */
if (check_ports_changed(hub)) {
if (PMSG_IS_AUTO(msg))
return -EBUSY;
pm_wakeup_event(&hdev->dev, 2000);
}
}
if (hub_is_superspeed(hdev) && hdev->do_remote_wakeup) {
/* Enable hub to send remote wakeup for all ports. */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
set_port_feature(hdev,
port1 |
USB_PORT_FEAT_REMOTE_WAKE_CONNECT |
USB_PORT_FEAT_REMOTE_WAKE_DISCONNECT |
USB_PORT_FEAT_REMOTE_WAKE_OVER_CURRENT,
USB_PORT_FEAT_REMOTE_WAKE_MASK);
}
}
dev_dbg(&intf->dev, "%s\n", __func__);
/* stop hub_wq and related activity */
hub_quiesce(hub, HUB_SUSPEND);
return 0;
}
/* Report wakeup requests from the ports of a resuming root hub */
static void report_wakeup_requests(struct usb_hub *hub)
{
struct usb_device *hdev = hub->hdev;
struct usb_device *udev;
struct usb_hcd *hcd;
unsigned long resuming_ports;
int i;
if (hdev->parent)
return; /* Not a root hub */
hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->get_resuming_ports) {
/*
* The get_resuming_ports() method returns a bitmap (origin 0)
* of ports which have started wakeup signaling but have not
* yet finished resuming. During system resume we will
* resume all the enabled ports, regardless of any wakeup
* signals, which means the wakeup requests would be lost.
* To prevent this, report them to the PM core here.
*/
resuming_ports = hcd->driver->get_resuming_ports(hcd);
for (i = 0; i < hdev->maxchild; ++i) {
if (test_bit(i, &resuming_ports)) {
udev = hub->ports[i]->child;
if (udev)
pm_wakeup_event(&udev->dev, 0);
}
}
}
}
static int hub_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESUME);
/*
* This should be called only for system resume, not runtime resume.
* We can't tell the difference here, so some wakeup requests will be
* reported at the wrong time or more than once. This shouldn't
* matter much, so long as they do get reported.
*/
report_wakeup_requests(hub);
return 0;
}
static int hub_reset_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESET_RESUME);
return 0;
}
/**
* usb_root_hub_lost_power - called by HCD if the root hub lost Vbus power
* @rhdev: struct usb_device for the root hub
*
* The USB host controller driver calls this function when its root hub
* is resumed and Vbus power has been interrupted or the controller
* has been reset. The routine marks @rhdev as having lost power.
* When the hub driver is resumed it will take notice and carry out
* power-session recovery for all the "USB-PERSIST"-enabled child devices;
* the others will be disconnected.
*/
void usb_root_hub_lost_power(struct usb_device *rhdev)
{
dev_notice(&rhdev->dev, "root hub lost power or was reset\n");
rhdev->reset_resume = 1;
}
EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
static const char * const usb3_lpm_names[] = {
"U0",
"U1",
"U2",
"U3",
};
/*
* Send a Set SEL control transfer to the device, prior to enabling
* device-initiated U1 or U2. This lets the device know the exit latencies from
* the time the device initiates a U1 or U2 exit, to the time it will receive a
* packet from the host.
*
* This function will fail if the SEL or PEL values for udev are greater than
* the maximum allowed values for the link state to be enabled.
*/
static int usb_req_set_sel(struct usb_device *udev)
{
struct usb_set_sel_req *sel_values;
unsigned long long u1_sel;
unsigned long long u1_pel;
unsigned long long u2_sel;
unsigned long long u2_pel;
int ret;
if (!udev->parent || udev->speed < USB_SPEED_SUPER || !udev->lpm_capable)
return 0;
/* Convert SEL and PEL stored in ns to us */
u1_sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
u1_pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
u2_sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
u2_pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
/*
* Make sure that the calculated SEL and PEL values for the link
* state we're enabling aren't bigger than the max SEL/PEL
* value that will fit in the SET SEL control transfer.
* Otherwise the device would get an incorrect idea of the exit
* latency for the link state, and could start a device-initiated
* U1/U2 when the exit latencies are too high.
*/
if (u1_sel > USB3_LPM_MAX_U1_SEL_PEL ||
u1_pel > USB3_LPM_MAX_U1_SEL_PEL ||
u2_sel > USB3_LPM_MAX_U2_SEL_PEL ||
u2_pel > USB3_LPM_MAX_U2_SEL_PEL) {
dev_dbg(&udev->dev, "Device-initiated U1/U2 disabled due to long SEL or PEL\n");
return -EINVAL;
}
/*
* usb_enable_lpm() can be called as part of a failed device reset,
* which may be initiated by an error path of a mass storage driver.
* Therefore, use GFP_NOIO.
*/
sel_values = kmalloc(sizeof *(sel_values), GFP_NOIO);
if (!sel_values)
return -ENOMEM;
sel_values->u1_sel = u1_sel;
sel_values->u1_pel = u1_pel;
sel_values->u2_sel = cpu_to_le16(u2_sel);
sel_values->u2_pel = cpu_to_le16(u2_pel);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_SEL,
USB_RECIP_DEVICE,
0, 0,
sel_values, sizeof *(sel_values),
USB_CTRL_SET_TIMEOUT);
kfree(sel_values);
if (ret > 0)
udev->lpm_devinit_allow = 1;
return ret;
}
/*
* Enable or disable device-initiated U1 or U2 transitions.
*/
static int usb_set_device_initiated_lpm(struct usb_device *udev,
enum usb3_link_state state, bool enable)
{
int ret;
int feature;
switch (state) {
case USB3_LPM_U1:
feature = USB_DEVICE_U1_ENABLE;
break;
case USB3_LPM_U2:
feature = USB_DEVICE_U2_ENABLE;
break;
default:
dev_warn(&udev->dev, "%s: Can't %s non-U1 or U2 state.\n",
__func__, enable ? "enable" : "disable");
return -EINVAL;
}
if (udev->state != USB_STATE_CONFIGURED) {
dev_dbg(&udev->dev, "%s: Can't %s %s state "
"for unconfigured device.\n",
__func__, enable ? "enable" : "disable",
usb3_lpm_names[state]);
return 0;
}
if (enable) {
/*
* Now send the control transfer to enable device-initiated LPM
* for either U1 or U2.
*/
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_FEATURE,
USB_RECIP_DEVICE,
feature,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
} else {
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE,
USB_RECIP_DEVICE,
feature,
0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
if (ret < 0) {
dev_warn(&udev->dev, "%s of device-initiated %s failed.\n",
enable ? "Enable" : "Disable",
usb3_lpm_names[state]);
return -EBUSY;
}
return 0;
}
static int usb_set_lpm_timeout(struct usb_device *udev,
enum usb3_link_state state, int timeout)
{
int ret;
int feature;
switch (state) {
case USB3_LPM_U1:
feature = USB_PORT_FEAT_U1_TIMEOUT;
break;
case USB3_LPM_U2:
feature = USB_PORT_FEAT_U2_TIMEOUT;
break;
default:
dev_warn(&udev->dev, "%s: Can't set timeout for non-U1 or U2 state.\n",
__func__);
return -EINVAL;
}
if (state == USB3_LPM_U1 && timeout > USB3_LPM_U1_MAX_TIMEOUT &&
timeout != USB3_LPM_DEVICE_INITIATED) {
dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x, "
"which is a reserved value.\n",
usb3_lpm_names[state], timeout);
return -EINVAL;
}
ret = set_port_feature(udev->parent,
USB_PORT_LPM_TIMEOUT(timeout) | udev->portnum,
feature);
if (ret < 0) {
dev_warn(&udev->dev, "Failed to set %s timeout to 0x%x,"
"error code %i\n", usb3_lpm_names[state],
timeout, ret);
return -EBUSY;
}
if (state == USB3_LPM_U1)
udev->u1_params.timeout = timeout;
else
udev->u2_params.timeout = timeout;
return 0;
}
/*
* Don't allow device intiated U1/U2 if the system exit latency + one bus
* interval is greater than the minimum service interval of any active
* periodic endpoint. See USB 3.2 section 9.4.9
*/
static bool usb_device_may_initiate_lpm(struct usb_device *udev,
enum usb3_link_state state)
{
unsigned int sel; /* us */
int i, j;
if (!udev->lpm_devinit_allow)
return false;
if (state == USB3_LPM_U1)
sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
else if (state == USB3_LPM_U2)
sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
else
return false;
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *intf;
struct usb_endpoint_descriptor *desc;
unsigned int interval;
intf = udev->actconfig->interface[i];
if (!intf)
continue;
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) {
desc = &intf->cur_altsetting->endpoint[j].desc;
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = (1 << (desc->bInterval - 1)) * 125;
if (sel + 125 > interval)
return false;
}
}
}
return true;
}
/*
* Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated
* U1/U2 entry.
*
* We will attempt to enable U1 or U2, but there are no guarantees that the
* control transfers to set the hub timeout or enable device-initiated U1/U2
* will be successful.
*
* If the control transfer to enable device-initiated U1/U2 entry fails, then
* hub-initiated U1/U2 will be disabled.
*
* If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
* driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
*/
static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
int timeout;
__u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
__le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
/* If the device says it doesn't have *any* exit latency to come out of
* U1 or U2, it's probably lying. Assume it doesn't implement that link
* state.
*/
if ((state == USB3_LPM_U1 && u1_mel == 0) ||
(state == USB3_LPM_U2 && u2_mel == 0))
return;
/* We allow the host controller to set the U1/U2 timeout internally
* first, so that it can change its schedule to account for the
* additional latency to send data to a device in a lower power
* link state.
*/
timeout = hcd->driver->enable_usb3_lpm_timeout(hcd, udev, state);
/* xHCI host controller doesn't want to enable this LPM state. */
if (timeout == 0)
return;
if (timeout < 0) {
dev_warn(&udev->dev, "Could not enable %s link state, "
"xHCI error %i.\n", usb3_lpm_names[state],
timeout);
return;
}
if (usb_set_lpm_timeout(udev, state, timeout)) {
/* If we can't set the parent hub U1/U2 timeout,
* device-initiated LPM won't be allowed either, so let the xHCI
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
return;
}
/* Only a configured device will accept the Set Feature
* U1/U2_ENABLE
*/
if (udev->actconfig &&
usb_device_may_initiate_lpm(udev, state)) {
if (usb_set_device_initiated_lpm(udev, state, true)) {
/*
* Request to enable device initiated U1/U2 failed,
* better to turn off lpm in this case.
*/
usb_set_lpm_timeout(udev, state, 0);
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
return;
}
}
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
}
/*
* Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated
* U1/U2 entry.
*
* If this function returns -EBUSY, the parent hub will still allow U1/U2 entry.
* If zero is returned, the parent will not allow the link to go into U1/U2.
*
* If zero is returned, device-initiated U1/U2 entry may still be enabled, but
* it won't have an effect on the bus link state because the parent hub will
* still disallow device-initiated U1/U2 entry.
*
* If zero is returned, the xHCI host controller may still think U1/U2 entry is
* possible. The result will be slightly more bus bandwidth will be taken up
* (to account for U1/U2 exit latency), but it should be harmless.
*/
static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
enum usb3_link_state state)
{
switch (state) {
case USB3_LPM_U1:
case USB3_LPM_U2:
break;
default:
dev_warn(&udev->dev, "%s: Can't disable non-U1 or U2 state.\n",
__func__);
return -EINVAL;
}
if (usb_set_lpm_timeout(udev, state, 0))
return -EBUSY;
usb_set_device_initiated_lpm(udev, state, false);
if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state))
dev_warn(&udev->dev, "Could not disable xHCI %s timeout, "
"bus schedule bandwidth may be impacted.\n",
usb3_lpm_names[state]);
/* As soon as usb_set_lpm_timeout(0) return 0, hub initiated LPM
* is disabled. Hub will disallows link to enter U1/U2 as well,
* even device is initiating LPM. Hence LPM is disabled if hub LPM
* timeout set to 0, no matter device-initiated LPM is disabled or
* not.
*/
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 0;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 0;
return 0;
}
/*
* Disable hub-initiated and device-initiated U1 and U2 entry.
* Caller must own the bandwidth_mutex.
*
* This will call usb_enable_lpm() on failure, which will decrement
* lpm_disable_count, and will re-enable LPM if lpm_disable_count reaches zero.
*/
int usb_disable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_CONFIGURED)
return 0;
hcd = bus_to_hcd(udev->bus);
if (!hcd || !hcd->driver->disable_usb3_lpm_timeout)
return 0;
udev->lpm_disable_count++;
if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
return 0;
/* If LPM is enabled, attempt to disable it. */
if (usb_disable_link_state(hcd, udev, USB3_LPM_U1))
goto enable_lpm;
if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
goto enable_lpm;
return 0;
enable_lpm:
usb_enable_lpm(udev);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(usb_disable_lpm);
/* Grab the bandwidth_mutex before calling usb_disable_lpm() */
int usb_unlocked_disable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret;
if (!hcd)
return -EINVAL;
mutex_lock(hcd->bandwidth_mutex);
ret = usb_disable_lpm(udev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
/*
* Attempt to enable device-initiated and hub-initiated U1 and U2 entry. The
* xHCI host policy may prevent U1 or U2 from being enabled.
*
* Other callers may have disabled link PM, so U1 and U2 entry will be disabled
* until the lpm_disable_count drops to zero. Caller must own the
* bandwidth_mutex.
*/
void usb_enable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd;
struct usb_hub *hub;
struct usb_port *port_dev;
if (!udev || !udev->parent ||
udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_CONFIGURED)
return;
udev->lpm_disable_count--;
hcd = bus_to_hcd(udev->bus);
/* Double check that we can both enable and disable LPM.
* Device must be configured to accept set feature U1/U2 timeout.
*/
if (!hcd || !hcd->driver->enable_usb3_lpm_timeout ||
!hcd->driver->disable_usb3_lpm_timeout)
return;
if (udev->lpm_disable_count > 0)
return;
hub = usb_hub_to_struct_hub(udev->parent);
if (!hub)
return;
port_dev = hub->ports[udev->portnum - 1];
if (port_dev->usb3_lpm_u1_permit)
usb_enable_link_state(hcd, udev, USB3_LPM_U1);
if (port_dev->usb3_lpm_u2_permit)
usb_enable_link_state(hcd, udev, USB3_LPM_U2);
}
EXPORT_SYMBOL_GPL(usb_enable_lpm);
/* Grab the bandwidth_mutex before calling usb_enable_lpm() */
void usb_unlocked_enable_lpm(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!hcd)
return;
mutex_lock(hcd->bandwidth_mutex);
usb_enable_lpm(udev);
mutex_unlock(hcd->bandwidth_mutex);
}
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
struct usb_port *port_dev)
{
struct usb_device *udev = port_dev->child;
int ret;
if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
ret = hub_set_port_link_state(hub, port_dev->portnum,
USB_SS_PORT_LS_U0);
if (!ret) {
msleep(USB_RESUME_TIMEOUT);
ret = usb_disable_remote_wakeup(udev);
}
if (ret)
dev_warn(&udev->dev,
"Port disable: can't disable remote wake\n");
udev->do_remote_wakeup = 0;
}
}
#else /* CONFIG_PM */
#define hub_suspend NULL
#define hub_resume NULL
#define hub_reset_resume NULL
static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
struct usb_port *port_dev) { }
int usb_disable_lpm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_disable_lpm);
void usb_enable_lpm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_enable_lpm);
int usb_unlocked_disable_lpm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_unlocked_disable_lpm);
void usb_unlocked_enable_lpm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
int usb_disable_ltm(struct usb_device *udev)
{
return 0;
}
EXPORT_SYMBOL_GPL(usb_disable_ltm);
void usb_enable_ltm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_enable_ltm);
static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
u16 portstatus, u16 portchange)
{
return 0;
}
static int usb_req_set_sel(struct usb_device *udev)
{
return 0;
}
#endif /* CONFIG_PM */
/*
* USB-3 does not have a similar link state as USB-2 that will avoid negotiating
* a connection with a plugged-in cable but will signal the host when the cable
* is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
*/
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *hdev = hub->hdev;
int ret = 0;
if (!hub->error) {
if (hub_is_superspeed(hub->hdev)) {
hub_usb3_port_prepare_disable(hub, port_dev);
ret = hub_set_port_link_state(hub, port_dev->portnum,
USB_SS_PORT_LS_U3);
} else {
ret = usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_ENABLE);
}
}
if (port_dev->child && set_state)
usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
if (ret && ret != -ENODEV)
dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
return ret;
}
/*
* usb_port_disable - disable a usb device's upstream port
* @udev: device to disable
* Context: @udev locked, must be able to sleep.
*
* Disables a USB device that isn't in active use.
*/
int usb_port_disable(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
return hub_port_disable(hub, udev->portnum, 0);
}
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
* Between connect detection and reset signaling there must be a delay
* of 100ms at least for debounce and power-settling. The corresponding
* timer shall restart whenever the downstream port detects a disconnect.
*
* Apparently there are some bluetooth and irda-dongles and a number of
* low-speed devices for which this debounce period may last over a second.
* Not covered by the spec - but easy to deal with.
*
* This implementation uses a 1500ms total debounce timeout; if the
* connection isn't stable by then it returns -ETIMEDOUT. It checks
* every 25ms for transient disconnects. When the port status has been
* unchanged for 100ms it returns the port status.
*/
int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected)
{
int ret;
u16 portchange, portstatus;
unsigned connection = 0xffff;
int total_time, stable_time = 0;
struct usb_port *port_dev = hub->ports[port1 - 1];
for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
ret = usb_hub_port_status(hub, port1, &portstatus, &portchange);
if (ret < 0)
return ret;
if (!(portchange & USB_PORT_STAT_C_CONNECTION) &&
(portstatus & USB_PORT_STAT_CONNECTION) == connection) {
if (!must_be_connected ||
(connection == USB_PORT_STAT_CONNECTION))
stable_time += HUB_DEBOUNCE_STEP;
if (stable_time >= HUB_DEBOUNCE_STABLE)
break;
} else {
stable_time = 0;
connection = portstatus & USB_PORT_STAT_CONNECTION;
}
if (portchange & USB_PORT_STAT_C_CONNECTION) {
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
}
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
break;
msleep(HUB_DEBOUNCE_STEP);
}
dev_dbg(&port_dev->dev, "debounce total %dms stable %dms status 0x%x\n",
total_time, stable_time, portstatus);
if (stable_time < HUB_DEBOUNCE_STABLE)
return -ETIMEDOUT;
return portstatus;
}
void usb_ep0_reinit(struct usb_device *udev)
{
usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
usb_enable_endpoint(udev, &udev->ep0, true);
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/*
* The host controller will choose the device address,
* instead of the core having chosen it earlier
*/
if (!hcd->driver->address_device && devnum <= 1)
return -EINVAL;
if (udev->state == USB_STATE_ADDRESS)
return 0;
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
if (hcd->driver->address_device)
retval = hcd->driver->address_device(hcd, udev);
else
retval = usb_control_msg(udev, usb_sndaddr0pipe(),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval == 0) {
update_devnum(udev, devnum);
/* Device now using proper address. */
usb_set_device_state(udev, USB_STATE_ADDRESS);
usb_ep0_reinit(udev);
}
return retval;
}
/*
* There are reports of USB 3.0 devices that say they support USB 2.0 Link PM
* when they're plugged into a USB 2.0 port, but they don't work when LPM is
* enabled.
*
* Only enable USB 2.0 Link PM if the port is internal (hardwired), or the
* device says it supports the new USB 2.0 Link PM errata by setting the BESL
* support bit in the BOS descriptor.
*/
static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
if (!udev->usb2_hw_lpm_capable || !udev->bos)
return;
if (hub)
connect_type = hub->ports[udev->portnum - 1]->connect_type;
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_enable_usb2_hardware_lpm(udev);
}
}
static int hub_enable_device(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!hcd->driver->enable_device)
return 0;
if (udev->state == USB_STATE_ADDRESS)
return 0;
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
return hcd->driver->enable_device(hcd, udev);
}
/*
* Get the bMaxPacketSize0 value during initialization by reading the
* device's device descriptor. Since we don't already know this value,
* the transfer is unsafe and it ignores I/O errors, only testing for
* reasonable received values.
*
* For "old scheme" initialization, size will be 8 so we read just the
* start of the device descriptor, which should work okay regardless of
* the actual bMaxPacketSize0 value. For "new scheme" initialization,
* size will be 64 (and buf will point to a sufficiently large buffer),
* which might not be kosher according to the USB spec but it's what
* Windows does and what many devices expect.
*
* Returns: bMaxPacketSize0 or a negative error code.
*/
static int get_bMaxPacketSize0(struct usb_device *udev,
struct usb_device_descriptor *buf, int size, bool first_time)
{
int i, rc;
/*
* Retry on all errors; some devices are flakey.
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
/* Start with invalid values in case the transfer fails */
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, size,
initial_descriptor_timeout);
switch (buf->bMaxPacketSize0) {
case 8: case 16: case 32: case 64: case 9:
if (buf->bDescriptorType == USB_DT_DEVICE) {
rc = buf->bMaxPacketSize0;
break;
}
fallthrough;
default:
if (rc >= 0)
rc = -EPROTO;
break;
}
/*
* Some devices time out if they are powered on
* when already connected. They need a second
* reset, so return early. But only on the first
* attempt, lest we get into a time-out/reset loop.
*/
if (rc > 0 || (rc == -ETIMEDOUT && first_time &&
udev->speed > USB_SPEED_FULL))
break;
}
return rc;
}
#define GET_DESCRIPTOR_BUFSIZE 64
/* Reset device, (re)assign address, get device descriptor.
* Device connection must be stable, no more debouncing needed.
* Returns device in USB_STATE_ADDRESS, except on error.
*
* If this is called for an already-existing device (as part of
* usb_reset_and_verify_device), the caller must own the device lock and
* the port lock. For a newly detected device that is not accessible
* through any global pointers, it's not necessary to lock the device,
* but it is still necessary to lock the port.
*
* For a newly detected device, @dev_descr must be NULL. The device
* descriptor retrieved from the device will then be stored in
* @udev->descriptor. For an already existing device, @dev_descr
* must be non-NULL. The device descriptor will be stored there,
* not in @udev->descriptor, because descriptors for registered
* devices are meant to be immutable.
*/
static int
hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
int retry_counter, struct usb_device_descriptor *dev_descr)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_port *port_dev = hub->ports[port1 - 1];
int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
int devnum = udev->devnum;
const char *driver_name;
bool do_new_scheme;
const bool initial = !dev_descr;
int maxp0;
struct usb_device_descriptor *buf, *descr;
buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
if (!buf)
return -ENOMEM;
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
*/
if (!hdev->parent) {
delay = HUB_ROOT_RESET_TIME;
if (port1 == hdev->bus->otg_port)
hdev->bus->b_hnp_enable = 0;
}
/* Some low speed devices have problems with the quick delay, so */
/* be a bit pessimistic with those devices. RHbug #23670 */
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
retval = -ENODEV;
/* Don't allow speed changes at reset, except usb 3.0 to faster */
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
!(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
dev_dbg(&udev->dev, "device reset changed speed!\n");
goto fail;
}
oldspeed = udev->speed;
if (initial) {
/* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
* it's fixed size except for full speed devices.
*/
switch (udev->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
case USB_SPEED_HIGH: /* fixed at 64 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
break;
case USB_SPEED_FULL: /* 8, 16, 32, or 64 */
/* to determine the ep0 maxpacket size, try to read
* the device descriptor to get bMaxPacketSize0 and
* then correct our initial guess.
*/
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
break;
case USB_SPEED_LOW: /* fixed at 8 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
break;
default:
goto fail;
}
}
speed = usb_speed_string(udev->speed);
/*
* The controller driver may be NULL if the controller device
* is the middle device between platform device and roothub.
* This middle device may not need a device driver due to
* all hardware control can be at platform device driver, this
* platform device is usually a dual-role USB controller device.
*/
if (udev->bus->controller->driver)
driver_name = udev->bus->controller->driver->name;
else
driver_name = udev->bus->sysdev->driver->name;
if (udev->speed < USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
(initial ? "new" : "reset"), speed,
devnum, driver_name);
if (initial) {
/* Set up TT records, if needed */
if (hdev->tt) {
udev->tt = hdev->tt;
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
if (!hub->tt.hub) {
dev_err(&udev->dev, "parent hub has no TT\n");
retval = -EINVAL;
goto fail;
}
udev->tt = &hub->tt;
udev->ttport = port1;
}
}
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
* Because device hardware and firmware is sometimes buggy in
* this area, and this is how Linux has done it for ages.
* Change it cautiously.
*
* NOTE: If use_new_scheme() is true we will start by issuing
* a 64-byte GET_DESCRIPTOR request. This is what Windows does,
* so it may help with some non-standards-compliant devices.
* Otherwise we start with SET_ADDRESS and then try to read the
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
do_new_scheme = use_new_scheme(udev, retry_counter, port_dev);
for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
if (hub_port_stop_enumerate(hub, port1, retries)) {
retval = -ENODEV;
break;
}
if (do_new_scheme) {
retval = hub_enable_device(udev);
if (retval < 0) {
dev_err(&udev->dev,
"hub failed to enable device, error %d\n",
retval);
goto fail;
}
maxp0 = get_bMaxPacketSize0(udev, buf,
GET_DESCRIPTOR_BUFSIZE, retries == 0);
if (maxp0 > 0 && !initial &&
maxp0 != udev->descriptor.bMaxPacketSize0) {
dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
retval = -ENODEV;
goto fail;
}
retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
goto fail;
if (oldspeed != udev->speed) {
dev_dbg(&udev->dev,
"device reset changed speed!\n");
retval = -ENODEV;
goto fail;
}
if (maxp0 < 0) {
if (maxp0 != -ENODEV)
dev_err(&udev->dev, "device descriptor read/64, error %d\n",
maxp0);
retval = maxp0;
continue;
}
}
for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
msleep(200);
}
if (retval < 0) {
if (retval != -ENODEV)
dev_err(&udev->dev, "device not accepting address %d, error %d\n",
devnum, retval);
goto fail;
}
if (udev->speed >= USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
"%s SuperSpeed%s%s USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
(udev->speed == USB_SPEED_SUPER_PLUS) ?
" Plus" : "",
(udev->ssp_rate == USB_SSP_GEN_2x2) ?
" Gen 2x2" :
(udev->ssp_rate == USB_SSP_GEN_2x1) ?
" Gen 2x1" :
(udev->ssp_rate == USB_SSP_GEN_1x2) ?
" Gen 1x2" : "",
devnum, driver_name);
}
/*
* cope with hardware quirkiness:
* - let SET_ADDRESS settle, some device hardware wants it
* - read ep0 maxpacket even for high and low speed,
*/
msleep(10);
if (do_new_scheme)
break;
maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0);
if (maxp0 < 0) {
retval = maxp0;
if (retval != -ENODEV)
dev_err(&udev->dev,
"device descriptor read/8, error %d\n",
retval);
} else {
u32 delay;
if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) {
dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
retval = -ENODEV;
goto fail;
}
delay = udev->parent->hub_delay;
udev->hub_delay = min_t(u32, delay,
USB_TP_TRANSMISSION_DELAY_MAX);
retval = usb_set_isoch_delay(udev);
if (retval) {
dev_dbg(&udev->dev,
"Failed set isoch delay, error %d\n",
retval);
retval = 0;
}
break;
}
}
if (retval)
goto fail;
/*
* Check the ep0 maxpacket guess and correct it if necessary.
* maxp0 is the value stored in the device descriptor;
* i is the value it encodes (logarithmic for SuperSpeed or greater).
*/
i = maxp0;
if (udev->speed >= USB_SPEED_SUPER) {
if (maxp0 <= 16)
i = 1 << maxp0;
else
i = 0; /* Invalid */
}
if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
; /* Initial ep0 maxpacket guess is right */
} else if ((udev->speed == USB_SPEED_FULL ||
udev->speed == USB_SPEED_HIGH) &&
(i == 8 || i == 16 || i == 32 || i == 64)) {
/* Initial guess is wrong; use the descriptor's value */
if (udev->speed == USB_SPEED_FULL)
dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
else
dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
} else {
/* Initial guess is wrong and descriptor's value is invalid */
dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0);
retval = -EMSGSIZE;
goto fail;
}
descr = usb_get_device_descriptor(udev);
if (IS_ERR(descr)) {
retval = PTR_ERR(descr);
if (retval != -ENODEV)
dev_err(&udev->dev, "device descriptor read/all, error %d\n",
retval);
goto fail;
}
if (initial)
udev->descriptor = *descr;
else
*dev_descr = *descr;
kfree(descr);
/*
* Some superspeed devices have finished the link training process
* and attached to a superspeed hub port, but the device descriptor
* got from those devices show they aren't superspeed devices. Warm
* reset the port attached by the devices can fix them.
*/
if ((udev->speed >= USB_SPEED_SUPER) &&
(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n");
hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true);
retval = -EINVAL;
goto fail;
}
usb_detect_quirks(udev);
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(udev);
if (!retval) {
udev->lpm_capable = usb_device_supports_lpm(udev);
udev->lpm_disable_count = 1;
usb_set_lpm_parameters(udev);
usb_req_set_sel(udev);
}
}
retval = 0;
/* notify HCD that we have a device connected and addressed */
if (hcd->driver->update_device)
hcd->driver->update_device(hcd, udev);
hub_set_initial_usb2_lpm_policy(udev);
fail:
if (retval) {
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
kfree(buf);
return retval;
}
static void
check_highspeed(struct usb_hub *hub, struct usb_device *udev, int port1)
{
struct usb_qualifier_descriptor *qual;
int status;
if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
return;
qual = kmalloc(sizeof *qual, GFP_KERNEL);
if (qual == NULL)
return;
status = usb_get_descriptor(udev, USB_DT_DEVICE_QUALIFIER, 0,
qual, sizeof *qual);
if (status == sizeof *qual) {
dev_info(&udev->dev, "not running at top speed; "
"connect to a high speed hub\n");
/* hub LEDs are probably harder to miss than syslog */
if (hub->has_indicators) {
hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
queue_delayed_work(system_power_efficient_wq,
&hub->leds, 0);
}
}
kfree(qual);
}
static unsigned
hub_power_remaining(struct usb_hub *hub)
{
struct usb_device *hdev = hub->hdev;
int remaining;
int port1;
if (!hub->limited_power)
return 0;
remaining = hdev->bus_mA - hub->descriptor->bHubContrCurrent;
for (port1 = 1; port1 <= hdev->maxchild; ++port1) {
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
unsigned unit_load;
int delta;
if (!udev)
continue;
if (hub_is_superspeed(udev))
unit_load = 150;
else
unit_load = 100;
/*
* Unconfigured devices may not use more than one unit load,
* or 8mA for OTG ports
*/
if (udev->actconfig)
delta = usb_get_max_power(udev, udev->actconfig);
else if (port1 != udev->bus->otg_port || hdev->parent)
delta = unit_load;
else
delta = 8;
if (delta > hub->mA_per_port)
dev_warn(&port_dev->dev, "%dmA is over %umA budget!\n",
delta, hub->mA_per_port);
remaining -= delta;
}
if (remaining < 0) {
dev_warn(hub->intfdev, "%dmA over power budget!\n",
-remaining);
remaining = 0;
}
return remaining;
}
static int descriptors_changed(struct usb_device *udev,
struct usb_device_descriptor *new_device_descriptor,
struct usb_host_bos *old_bos)
{
int changed = 0;
unsigned index;
unsigned serial_len = 0;
unsigned len;
unsigned old_length;
int length;
char *buf;
if (memcmp(&udev->descriptor, new_device_descriptor,
sizeof(*new_device_descriptor)) != 0)
return 1;
if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
return 1;
if (udev->bos) {
len = le16_to_cpu(udev->bos->desc->wTotalLength);
if (len != le16_to_cpu(old_bos->desc->wTotalLength))
return 1;
if (memcmp(udev->bos->desc, old_bos->desc, len))
return 1;
}
/* Since the idVendor, idProduct, and bcdDevice values in the
* device descriptor haven't changed, we will assume the
* Manufacturer and Product strings haven't changed either.
* But the SerialNumber string could be different (e.g., a
* different flash card of the same brand).
*/
if (udev->serial)
serial_len = strlen(udev->serial) + 1;
len = serial_len;
for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
len = max(len, old_length);
}
buf = kmalloc(len, GFP_NOIO);
if (!buf)
/* assume the worst */
return 1;
for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
old_length);
if (length != old_length) {
dev_dbg(&udev->dev, "config index %d, error %d\n",
index, length);
changed = 1;
break;
}
if (memcmp(buf, udev->rawdescriptors[index], old_length)
!= 0) {
dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
index,
((struct usb_config_descriptor *) buf)->
bConfigurationValue);
changed = 1;
break;
}
}
if (!changed && serial_len) {
length = usb_string(udev, udev->descriptor.iSerialNumber,
buf, serial_len);
if (length + 1 != serial_len) {
dev_dbg(&udev->dev, "serial string error %d\n",
length);
changed = 1;
} else if (memcmp(buf, udev->serial, length) != 0) {
dev_dbg(&udev->dev, "serial string changed\n");
changed = 1;
}
}
kfree(buf);
return changed;
}
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
int status = -ENODEV;
int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
bool retry_locked;
/* Disconnect any existing devices under this port */
if (udev) {
if (hcd->usb_phy && !hdev->parent)
usb_phy_notify_disconnect(hcd->usb_phy, udev->speed);
usb_disconnect(&port_dev->child);
}
/* We can forget about a "removed" device when there's a physical
* disconnect or the connect status changes.
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
(portchange & USB_PORT_STAT_C_CONNECTION))
clear_bit(port1, hub->removed_bits);
if (portchange & (USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE)) {
status = hub_port_debounce_be_stable(hub, port1);
if (status < 0) {
if (status != -ENODEV &&
port1 != unreliable_port &&
printk_ratelimit())
dev_err(&port_dev->dev, "connect-debounce failed\n");
portstatus &= ~USB_PORT_STAT_CONNECTION;
unreliable_port = port1;
} else {
portstatus = status;
}
}
/* Return now if debouncing failed or nothing is connected or
* the device was "removed".
*/
if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
test_bit(port1, hub->removed_bits)) {
/*
* maybe switch power back on (e.g. root hub was reset)
* but only if the port isn't owned by someone else.
*/
if (hub_is_port_power_switchable(hub)
&& !usb_port_is_power_on(hub, portstatus)
&& !port_dev->port_owner)
set_port_feature(hdev, port1, USB_PORT_FEAT_POWER);
if (portstatus & USB_PORT_STAT_ENABLE)
goto done;
return;
}
if (hub_is_superspeed(hub->hdev))
unit_load = 150;
else
unit_load = 100;
status = 0;
for (i = 0; i < PORT_INIT_TRIES; i++) {
if (hub_port_stop_enumerate(hub, port1, i)) {
status = -ENODEV;
break;
}
usb_lock_port(port_dev);
mutex_lock(hcd->address0_mutex);
retry_locked = true;
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
*/
udev = usb_alloc_dev(hdev, hdev->bus, port1);
if (!udev) {
dev_err(&port_dev->dev,
"couldn't allocate usb_device\n");
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
goto done;
}
usb_set_device_state(udev, USB_STATE_POWERED);
udev->bus_mA = hub->mA_per_port;
udev->level = hdev->level + 1;
/* Devices connected to SuperSpeed hubs are USB 3.0 or later */
if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
udev->speed = USB_SPEED_UNKNOWN;
choose_devnum(udev);
if (udev->devnum <= 0) {
status = -ENOTCONN; /* Don't retry */
goto loop;
}
/* reset (non-USB 3.0 devices) and get descriptor */
status = hub_port_init(hub, udev, port1, i, NULL);
if (status < 0)
goto loop;
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
retry_locked = false;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
* a "powered" LED, users should notice we didn't enable it
* (without reading syslog), even without per-port LEDs
* on the parent.
*/
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB
&& udev->bus_mA <= unit_load) {
u16 devstat;
status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
if (status) {
dev_dbg(&udev->dev, "get status %d ?\n", status);
goto loop_disable;
}
if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
dev_err(&udev->dev,
"can't connect bus-powered hub "
"to this port\n");
if (hub->has_indicators) {
hub->indicator[port1-1] =
INDICATOR_AMBER_BLINK;
queue_delayed_work(
system_power_efficient_wq,
&hub->leds, 0);
}
status = -ENOTCONN; /* Don't retry */
goto loop_disable;
}
}
/* check for devices running slower than they could */
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
&& highspeed_hubs != 0)
check_highspeed(hub, udev, port1);
/* Store the parent's children[] pointer. At this point
* udev becomes globally accessible, although presumably
* no one will look at it until hdev is unlocked.
*/
status = 0;
mutex_lock(&usb_port_peer_mutex);
/* We mustn't add new devices if the parent hub has
* been disconnected; we would race with the
* recursively_mark_NOTATTACHED() routine.
*/
spin_lock_irq(&device_state_lock);
if (hdev->state == USB_STATE_NOTATTACHED)
status = -ENOTCONN;
else
port_dev->child = udev;
spin_unlock_irq(&device_state_lock);
mutex_unlock(&usb_port_peer_mutex);
/* Run it through the hoops (find a driver, etc) */
if (!status) {
status = usb_new_device(udev);
if (status) {
mutex_lock(&usb_port_peer_mutex);
spin_lock_irq(&device_state_lock);
port_dev->child = NULL;
spin_unlock_irq(&device_state_lock);
mutex_unlock(&usb_port_peer_mutex);
} else {
if (hcd->usb_phy && !hdev->parent)
usb_phy_notify_connect(hcd->usb_phy,
udev->speed);
}
}
if (status)
goto loop_disable;
status = hub_power_remaining(hub);
if (status)
dev_dbg(hub->intfdev, "%dmA power budget left\n", status);
return;
loop_disable:
hub_port_disable(hub, port1, 1);
loop:
usb_ep0_reinit(udev);
release_devnum(udev);
hub_free_dev(udev);
if (retry_locked) {
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
}
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
/* When halfway through our retry count, power-cycle the port */
if (i == (PORT_INIT_TRIES - 1) / 2) {
dev_info(&port_dev->dev, "attempt power cycle\n");
usb_hub_set_port_power(hdev, hub, port1, false);
msleep(2 * hub_power_on_good_delay(hub));
usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
}
}
if (hub->hdev->parent ||
!hcd->driver->port_handed_over ||
!(hcd->driver->port_handed_over)(hcd, port1)) {
if (status != -ENOTCONN && status != -ENODEV)
dev_err(&port_dev->dev,
"unable to enumerate USB device\n");
}
done:
hub_port_disable(hub, port1, 1);
if (hcd->driver->relinquish_port && !hub->hdev->parent) {
if (status != -ENOTCONN && status != -ENODEV)
hcd->driver->relinquish_port(hcd, port1);
}
}
/* Handle physical or logical connection change events.
* This routine is called when:
* a port connection-change occurs;
* a port enable-change occurs (often caused by EMI);
* usb_reset_and_verify_device() encounters changed descriptors (as from
* a firmware download)
* caller already locked the hub
*/
static void hub_port_connect_change(struct usb_hub *hub, int port1,
u16 portstatus, u16 portchange)
__must_hold(&port_dev->status_lock)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
struct usb_device_descriptor *descr;
int status = -ENODEV;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
if (hub->has_indicators) {
set_port_led(hub, port1, HUB_LED_AUTO);
hub->indicator[port1-1] = INDICATOR_AUTO;
}
#ifdef CONFIG_USB_OTG
/* during HNP, don't repeat the debounce */
if (hub->hdev->bus->is_b_host)
portchange &= ~(USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE);
#endif
/* Try to resuscitate an existing device */
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
/*
* USB-3 connections are initialized automatically by
* the hostcontroller hardware. Therefore check for
* changed device descriptors before resuscitating the
* device.
*/
descr = usb_get_device_descriptor(udev);
if (IS_ERR(descr)) {
dev_dbg(&udev->dev,
"can't read device descriptor %ld\n",
PTR_ERR(descr));
} else {
if (descriptors_changed(udev, descr,
udev->bos)) {
dev_dbg(&udev->dev,
"device descriptor has changed\n");
} else {
status = 0; /* Nothing to do */
}
kfree(descr);
}
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
/* For a suspended device, treat this as a
* remote wakeup event.
*/
usb_unlock_port(port_dev);
status = usb_remote_wakeup(udev);
usb_lock_port(port_dev);
#endif
} else {
/* Don't resuscitate */;
}
}
clear_bit(port1, hub->change_bits);
/* successfully revalidated the connection */
if (status == 0)
return;
usb_unlock_port(port_dev);
hub_port_connect(hub, port1, portstatus, portchange);
usb_lock_port(port_dev);
}
/* Handle notifying userspace about hub over-current events */
static void port_over_current_notify(struct usb_port *port_dev)
{
char *envp[3] = { NULL, NULL, NULL };
struct device *hub_dev;
char *port_dev_path;
sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count");
hub_dev = port_dev->dev.parent;
if (!hub_dev)
return;
port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL);
if (!port_dev_path)
return;
envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
if (!envp[0])
goto exit;
envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
port_dev->over_current_count);
if (!envp[1])
goto exit;
kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
exit:
kfree(envp[1]);
kfree(envp[0]);
kfree(port_dev_path);
}
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
int connect_change;
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
int i = 0;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
clear_bit(port1, hub->wakeup_bits);
if (usb_hub_port_status(hub, port1, &portstatus, &portchange) < 0)
return;
if (portchange & USB_PORT_STAT_C_CONNECTION) {
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
connect_change = 1;
}
if (portchange & USB_PORT_STAT_C_ENABLE) {
if (!connect_change)
dev_dbg(&port_dev->dev, "enable change, status %08x\n",
portstatus);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
/*
* EM interference sometimes causes badly shielded USB devices
* to be shutdown by the hub, this hack enables them again.
* Works at least with mouse driver.
*/
if (!(portstatus & USB_PORT_STAT_ENABLE)
&& !connect_change && udev) {
dev_err(&port_dev->dev, "disabled by hub (EMI?), re-enabling...\n");
connect_change = 1;
}
}
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
port_dev->over_current_count++;
port_over_current_notify(port_dev);
dev_dbg(&port_dev->dev, "over-current change #%u\n",
port_dev->over_current_count);
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_OVER_CURRENT);
msleep(100); /* Cool down */
hub_power_on(hub, true);
usb_hub_port_status(hub, port1, &status, &unused);
if (status & USB_PORT_STAT_OVERCURRENT)
dev_err(&port_dev->dev, "over-current condition\n");
}
if (portchange & USB_PORT_STAT_C_RESET) {
dev_dbg(&port_dev->dev, "reset change\n");
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_RESET);
}
if ((portchange & USB_PORT_STAT_C_BH_RESET)
&& hub_is_superspeed(hdev)) {
dev_dbg(&port_dev->dev, "warm reset change\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
}
if (portchange & USB_PORT_STAT_C_LINK_STATE) {
dev_dbg(&port_dev->dev, "link state change\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
}
if (portchange & USB_PORT_STAT_C_CONFIG_ERROR) {
dev_warn(&port_dev->dev, "config error\n");
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_PORT_CONFIG_ERROR);
}
/* skip port actions that require the port to be powered on */
if (!pm_runtime_active(&port_dev->dev))
return;
/* skip port actions if ignore_event and early_stop are true */
if (port_dev->ignore_event && port_dev->early_stop)
return;
if (hub_handle_remote_wakeup(hub, port1, portstatus, portchange))
connect_change = 1;
/*
* Avoid trying to recover a USB3 SS.Inactive port with a warm reset if
* the device was disconnected. A 12ms disconnect detect timer in
* SS.Inactive state transitions the port to RxDetect automatically.
* SS.Inactive link error state is common during device disconnect.
*/
while (hub_port_warm_reset_required(hub, port1, portstatus)) {
if ((i++ < DETECT_DISCONNECT_TRIES) && udev) {
u16 unused;
msleep(20);
usb_hub_port_status(hub, port1, &portstatus, &unused);
dev_dbg(&port_dev->dev, "Wait for inactive link disconnect detect\n");
continue;
} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&port_dev->dev, "do warm reset, port only\n");
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
hub_port_disable(hub, port1, 1);
} else {
dev_dbg(&port_dev->dev, "do warm reset, full device\n");
usb_unlock_port(port_dev);
usb_lock_device(udev);
usb_reset_device(udev);
usb_unlock_device(udev);
usb_lock_port(port_dev);
connect_change = 0;
}
break;
}
if (connect_change)
hub_port_connect_change(hub, port1, portstatus, portchange);
}
static void hub_event(struct work_struct *work)
{
struct usb_device *hdev;
struct usb_interface *intf;
struct usb_hub *hub;
struct device *hub_dev;
u16 hubstatus;
u16 hubchange;
int i, ret;
hub = container_of(work, struct usb_hub, events);
hdev = hub->hdev;
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
kcov_remote_start_usb((u64)hdev->bus->busnum);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
/* Lock the device, then check to see if we were
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
goto out_hdev_lock;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
hub->error = -ENODEV;
hub_quiesce(hub, HUB_DISCONNECT);
goto out_hdev_lock;
}
/* Autoresume */
ret = usb_autopm_get_interface(intf);
if (ret) {
dev_dbg(hub_dev, "Can't autoresume: %d\n", ret);
goto out_hdev_lock;
}
/* If this is an inactive hub, do nothing */
if (hub->quiescing)
goto out_autopm;
if (hub->error) {
dev_dbg(hub_dev, "resetting for error %d\n", hub->error);
ret = usb_reset_device(hdev);
if (ret) {
dev_dbg(hub_dev, "error resetting hub: %d\n", ret);
goto out_autopm;
}
hub->nerrors = 0;
hub->error = 0;
}
/* deal with port status changes */
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
if (test_bit(i, hub->event_bits)
|| test_bit(i, hub->change_bits)
|| test_bit(i, hub->wakeup_bits)) {
/*
* The get_noresume and barrier ensure that if
* the port was in the process of resuming, we
* flush that work and keep the port active for
* the duration of the port_event(). However,
* if the port is runtime pm suspended
* (powered-off), we leave it in that state, run
* an abbreviated port_event(), and move on.
*/
pm_runtime_get_noresume(&port_dev->dev);
pm_runtime_barrier(&port_dev->dev);
usb_lock_port(port_dev);
port_event(hub, i);
usb_unlock_port(port_dev);
pm_runtime_put_sync(&port_dev->dev);
}
}
/* deal with hub status changes */
if (test_and_clear_bit(0, hub->event_bits) == 0)
; /* do nothing */
else if (hub_hub_status(hub, &hubstatus, &hubchange) < 0)
dev_err(hub_dev, "get_hub_status failed\n");
else {
if (hubchange & HUB_CHANGE_LOCAL_POWER) {
dev_dbg(hub_dev, "power change\n");
clear_hub_feature(hdev, C_HUB_LOCAL_POWER);
if (hubstatus & HUB_STATUS_LOCAL_POWER)
/* FIXME: Is this always true? */
hub->limited_power = 1;
else
hub->limited_power = 0;
}
if (hubchange & HUB_CHANGE_OVERCURRENT) {
u16 status = 0;
u16 unused;
dev_dbg(hub_dev, "over-current change\n");
clear_hub_feature(hdev, C_HUB_OVER_CURRENT);
msleep(500); /* Cool down */
hub_power_on(hub, true);
hub_hub_status(hub, &status, &unused);
if (status & HUB_STATUS_OVERCURRENT)
dev_err(hub_dev, "over-current condition\n");
}
}
out_autopm:
/* Balance the usb_autopm_get_interface() above */
usb_autopm_put_interface_no_suspend(intf);
out_hdev_lock:
usb_unlock_device(hdev);
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
kref_put(&hub->kref, hub_release);
kcov_remote_stop();
}
static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_SMSC,
.idProduct = USB_PRODUCT_USB5534B,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT,
.idVendor = USB_VENDOR_CYPRESS,
.idProduct = USB_PRODUCT_CY7C65632,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT,
.idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
.idProduct = USB_PRODUCT_TUSB8041_USB2,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT,
.idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
.idProduct = USB_PRODUCT_TUSB8041_USB3,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
.bInterfaceClass = USB_CLASS_HUB},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, hub_id_table);
static struct usb_driver hub_driver = {
.name = "hub",
.probe = hub_probe,
.disconnect = hub_disconnect,
.suspend = hub_suspend,
.resume = hub_resume,
.reset_resume = hub_reset_resume,
.pre_reset = hub_pre_reset,
.post_reset = hub_post_reset,
.unlocked_ioctl = hub_ioctl,
.id_table = hub_id_table,
.supports_autosuspend = 1,
};
int usb_hub_init(void)
{
if (usb_register(&hub_driver) < 0) {
printk(KERN_ERR "%s: can't register hub driver\n",
usbcore_name);
return -1;
}
/*
* The workqueue needs to be freezable to avoid interfering with
* USB-PERSIST port handover. Otherwise it might see that a full-speed
* device was gone before the EHCI controller had handed its port
* over to the companion full-speed controller.
*/
hub_wq = alloc_workqueue("usb_hub_wq", WQ_FREEZABLE, 0);
if (hub_wq)
return 0;
/* Fall through if kernel_thread failed */
usb_deregister(&hub_driver);
pr_err("%s: can't allocate workqueue for usb hub\n", usbcore_name);
return -1;
}
void usb_hub_cleanup(void)
{
destroy_workqueue(hub_wq);
/*
* Hub resources are freed for us by usb_deregister. It calls
* usb_driver_purge on every device which in turn calls that
* devices disconnect function if it is using this driver.
* The hub_disconnect function takes care of releasing the
* individual hub resources. -greg
*/
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
* WARNING - don't use this routine to reset a composite device
* (one with multiple interfaces owned by separate drivers)!
* Use usb_reset_device() instead.
*
* Do a port reset, reassign the device's address, and establish its
* former operating configuration. If the reset fails, or the device's
* descriptors change from their values before the reset, or the original
* configuration and altsettings cannot be restored, a flag will be set
* telling hub_wq to pretend the device has been disconnected and then
* re-connected. All drivers will be unbound, and the device will be
* re-enumerated and probed all over again.
*
* Return: 0 if the reset succeeded, -ENODEV if the device has been
* flagged for logical disconnection, or some other negative error code
* if the reset wasn't even attempted.
*
* Note:
* The caller must own the device lock and the port lock, the latter is
* taken by usb_reset_device(). For example, it's safe to use
* usb_reset_device() from a driver probe() routine after downloading
* new firmware. For calls that might not occur during probe(), drivers
* should lock the device using usb_lock_device_for_reset().
*
* Locking exception: This routine may also be called from within an
* autoresume handler. Such usage won't conflict with other tasks
* holding the device lock because these tasks should always call
* usb_autopm_resume_device(), thereby preventing any unwanted
* autoresume. The autoresume handler is expected to have already
* acquired the port lock before calling this routine.
*/
static int usb_reset_and_verify_device(struct usb_device *udev)
{
struct usb_device *parent_hdev = udev->parent;
struct usb_hub *parent_hub;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor;
struct usb_host_bos *bos;
int i, j, ret = 0;
int port1 = udev->portnum;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
}
if (!parent_hdev)
return -EISDIR;
parent_hub = usb_hub_to_struct_hub(parent_hdev);
/* Disable USB2 hardware LPM.
* It will be re-enabled by the enumeration process.
*/
usb_disable_usb2_hardware_lpm(udev);
bos = udev->bos;
udev->bos = NULL;
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
if (hub_port_stop_enumerate(parent_hub, port1, i)) {
ret = -ENODEV;
break;
}
/* ep0 maxpacket size may change; let the HCD know about it.
* Other endpoints will be handled by re-enumeration. */
usb_ep0_reinit(udev);
ret = hub_port_init(parent_hub, udev, port1, i, &descriptor);
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
mutex_unlock(hcd->address0_mutex);
if (ret < 0)
goto re_enumerate;
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor, bos)) {
dev_info(&udev->dev, "device firmware changed\n");
goto re_enumerate;
}
/* Restore the device's previous configuration */
if (!udev->actconfig)
goto done;
mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
if (ret < 0) {
dev_warn(&udev->dev,
"Busted HC? Not enough HCD resources for "
"old configuration.\n");
mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_CONFIGURATION, 0,
udev->actconfig->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (ret < 0) {
dev_err(&udev->dev,
"can't restore configuration #%d (error=%d)\n",
udev->actconfig->desc.bConfigurationValue, ret);
mutex_unlock(hcd->bandwidth_mutex);
goto re_enumerate;
}
mutex_unlock(hcd->bandwidth_mutex);
usb_set_device_state(udev, USB_STATE_CONFIGURED);
/* Put interfaces back into the same altsettings as before.
* Don't bother to send the Set-Interface request for interfaces
* that were already in altsetting 0; besides being unnecessary,
* many devices can't handle it. Instead just reset the host-side
* endpoint state.
*/
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
struct usb_host_config *config = udev->actconfig;
struct usb_interface *intf = config->interface[i];
struct usb_interface_descriptor *desc;
desc = &intf->cur_altsetting->desc;
if (desc->bAlternateSetting == 0) {
usb_disable_interface(udev, intf, true);
usb_enable_interface(udev, intf, true);
ret = 0;
} else {
/* Let the bandwidth allocation function know that this
* device has been reset, and it will have to use
* alternate setting 0 as the current alternate setting.
*/
intf->resetting_device = 1;
ret = usb_set_interface(udev, desc->bInterfaceNumber,
desc->bAlternateSetting);
intf->resetting_device = 0;
}
if (ret < 0) {
dev_err(&udev->dev, "failed to restore interface %d "
"altsetting %d (error=%d)\n",
desc->bInterfaceNumber,
desc->bAlternateSetting,
ret);
goto re_enumerate;
}
/* Resetting also frees any allocated streams */
for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++)
intf->cur_altsetting->endpoint[j].streams = 0;
}
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
usb_enable_usb2_hardware_lpm(udev);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);
udev->bos = bos;
return 0;
re_enumerate:
usb_release_bos_descriptor(udev);
udev->bos = bos;
hub_port_logical_disconnect(parent_hub, port1);
return -ENODEV;
}
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
* @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
* However, if a reset is already in progress (for instance, if a
* driver doesn't have pre_reset() or post_reset() callbacks, and while
* being unbound or re-bound during the ongoing reset its disconnect()
* or probe() routine tries to perform a second, nested reset), the
* routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
* this from a driver probe() routine after downloading new firmware.
* For calls that might not occur during probe(), drivers should lock
* the device using usb_lock_device_for_reset().
*
* If an interface is currently being probed or disconnected, we assume
* its driver knows how to handle resets. For all other interfaces,
* if the driver doesn't have pre_reset and post_reset methods then
* we attempt to unbind it and rebind afterward.
*/
int usb_reset_device(struct usb_device *udev)
{
int ret;
int i;
unsigned int noio_flag;
struct usb_port *port_dev;
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
}
if (!udev->parent) {
/* this requires hcd-specific logic; see ohci_restart() */
dev_dbg(&udev->dev, "%s for root hub!\n", __func__);
return -EISDIR;
}
if (udev->reset_in_progress)
return -EINPROGRESS;
udev->reset_in_progress = 1;
port_dev = hub->ports[udev->portnum - 1];
/*
* Don't allocate memory with GFP_KERNEL in current
* context to avoid possible deadlock if usb mass
* storage interface or usbnet interface(iSCSI case)
* is included in current configuration. The easist
* approach is to do it for every device reset,
* because the device 'memalloc_noio' flag may have
* not been set before reseting the usb device.
*/
noio_flag = memalloc_noio_save();
/* Prevent autosuspend during the reset */
usb_autoresume_device(udev);
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
struct usb_interface *cintf = config->interface[i];
struct usb_driver *drv;
int unbind = 0;
if (cintf->dev.driver) {
drv = to_usb_driver(cintf->dev.driver);
if (drv->pre_reset && drv->post_reset)
unbind = (drv->pre_reset)(cintf);
else if (cintf->condition ==
USB_INTERFACE_BOUND)
unbind = 1;
if (unbind)
usb_forced_unbind_intf(cintf);
}
}
}
usb_lock_port(port_dev);
ret = usb_reset_and_verify_device(udev);
usb_unlock_port(port_dev);
if (config) {
for (i = config->desc.bNumInterfaces - 1; i >= 0; --i) {
struct usb_interface *cintf = config->interface[i];
struct usb_driver *drv;
int rebind = cintf->needs_binding;
if (!rebind && cintf->dev.driver) {
drv = to_usb_driver(cintf->dev.driver);
if (drv->post_reset)
rebind = (drv->post_reset)(cintf);
else if (cintf->condition ==
USB_INTERFACE_BOUND)
rebind = 1;
if (rebind)
cintf->needs_binding = 1;
}
}
/* If the reset failed, hub_wq will unbind drivers later */
if (ret == 0)
usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
/**
* usb_queue_reset_device - Reset a USB device from an atomic context
* @iface: USB interface belonging to the device to reset
*
* This function can be used to reset a USB device from an atomic
* context, where usb_reset_device() won't work (as it blocks).
*
* Doing a reset via this method is functionally equivalent to calling
* usb_reset_device(), except for the fact that it is delayed to a
* workqueue. This means that any drivers bound to other interfaces
* might be unbound, as well as users from usbfs in user space.
*
* Corner cases:
*
* - Scheduling two resets at the same time from two different drivers
* attached to two different interfaces of the same device is
* possible; depending on how the driver attached to each interface
* handles ->pre_reset(), the second reset might happen or not.
*
* - If the reset is delayed so long that the interface is unbound from
* its driver, the reset will be skipped.
*
* - This function can be called during .probe(). It can also be called
* during .disconnect(), but doing so is pointless because the reset
* will not occur. If you really want to reset the device during
* .disconnect(), call usb_reset_device() directly -- but watch out
* for nested unbinding issues!
*/
void usb_queue_reset_device(struct usb_interface *iface)
{
if (schedule_work(&iface->reset_ws))
usb_get_intf(iface);
}
EXPORT_SYMBOL_GPL(usb_queue_reset_device);
/**
* usb_hub_find_child - Get the pointer of child device
* attached to the port which is specified by @port1.
* @hdev: USB device belonging to the usb hub
* @port1: port num to indicate which port the child device
* is attached to.
*
* USB drivers call this function to get hub's child device
* pointer.
*
* Return: %NULL if input param is invalid and
* child's usb_device pointer if non-NULL.
*/
struct usb_device *usb_hub_find_child(struct usb_device *hdev,
int port1)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (port1 < 1 || port1 > hdev->maxchild)
return NULL;
return hub->ports[port1 - 1]->child;
}
EXPORT_SYMBOL_GPL(usb_hub_find_child);
void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
struct usb_hub_descriptor *desc)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
enum usb_port_connect_type connect_type;
int i;
if (!hub)
return;
if (!hub_is_superspeed(hdev)) {
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
connect_type = port_dev->connect_type;
if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
u8 mask = 1 << (i%8);
if (!(desc->u.hs.DeviceRemovable[i/8] & mask)) {
dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n");
desc->u.hs.DeviceRemovable[i/8] |= mask;
}
}
}
} else {
u16 port_removable = le16_to_cpu(desc->u.ss.DeviceRemovable);
for (i = 1; i <= hdev->maxchild; i++) {
struct usb_port *port_dev = hub->ports[i - 1];
connect_type = port_dev->connect_type;
if (connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
u16 mask = 1 << i;
if (!(port_removable & mask)) {
dev_dbg(&port_dev->dev, "DeviceRemovable is changed to 1 according to platform information.\n");
port_removable |= mask;
}
}
}
desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable);
}
}
#ifdef CONFIG_ACPI
/**
* usb_get_hub_port_acpi_handle - Get the usb port's acpi handle
* @hdev: USB device belonging to the usb hub
* @port1: port num of the port
*
* Return: Port's acpi handle if successful, %NULL if params are
* invalid.
*/
acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
int port1)
{
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
if (!hub)
return NULL;
return ACPI_HANDLE(&hub->ports[port1 - 1]->dev);
}
#endif
| linux-master | drivers/usb/core/hub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/endpoint.c
*
* (C) Copyright 2002,2004,2006 Greg Kroah-Hartman
* (C) Copyright 2002,2004 IBM Corp.
* (C) Copyright 2006 Novell Inc.
*
* Released under the GPLv2 only.
*
* Endpoint sysfs stuff
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "usb.h"
struct ep_device {
struct usb_endpoint_descriptor *desc;
struct usb_device *udev;
struct device dev;
};
#define to_ep_device(_dev) \
container_of(_dev, struct ep_device, dev)
struct ep_attribute {
struct attribute attr;
ssize_t (*show)(struct usb_device *,
struct usb_endpoint_descriptor *, char *);
};
#define to_ep_attribute(_attr) \
container_of(_attr, struct ep_attribute, attr)
#define usb_ep_attr(field, format_string) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ep_device *ep = to_ep_device(dev); \
return sprintf(buf, format_string, ep->desc->field); \
} \
static DEVICE_ATTR_RO(field)
usb_ep_attr(bLength, "%02x\n");
usb_ep_attr(bEndpointAddress, "%02x\n");
usb_ep_attr(bmAttributes, "%02x\n");
usb_ep_attr(bInterval, "%02x\n");
static ssize_t wMaxPacketSize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc));
}
static DEVICE_ATTR_RO(wMaxPacketSize);
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *type = "unknown";
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Control";
break;
case USB_ENDPOINT_XFER_ISOC:
type = "Isoc";
break;
case USB_ENDPOINT_XFER_BULK:
type = "Bulk";
break;
case USB_ENDPOINT_XFER_INT:
type = "Interrupt";
break;
}
return sprintf(buf, "%s\n", type);
}
static DEVICE_ATTR_RO(type);
static ssize_t interval_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ep_device *ep = to_ep_device(dev);
unsigned int interval;
char unit;
interval = usb_decode_interval(ep->desc, ep->udev->speed);
if (interval % 1000) {
unit = 'u';
} else {
unit = 'm';
interval /= 1000;
}
return sprintf(buf, "%d%cs\n", interval, unit);
}
static DEVICE_ATTR_RO(interval);
static ssize_t direction_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *direction;
if (usb_endpoint_xfer_control(ep->desc))
direction = "both";
else if (usb_endpoint_dir_in(ep->desc))
direction = "in";
else
direction = "out";
return sprintf(buf, "%s\n", direction);
}
static DEVICE_ATTR_RO(direction);
static struct attribute *ep_dev_attrs[] = {
&dev_attr_bLength.attr,
&dev_attr_bEndpointAddress.attr,
&dev_attr_bmAttributes.attr,
&dev_attr_bInterval.attr,
&dev_attr_wMaxPacketSize.attr,
&dev_attr_interval.attr,
&dev_attr_type.attr,
&dev_attr_direction.attr,
NULL,
};
static const struct attribute_group ep_dev_attr_grp = {
.attrs = ep_dev_attrs,
};
static const struct attribute_group *ep_dev_groups[] = {
&ep_dev_attr_grp,
NULL
};
static void ep_device_release(struct device *dev)
{
struct ep_device *ep_dev = to_ep_device(dev);
kfree(ep_dev);
}
struct device_type usb_ep_device_type = {
.name = "usb_endpoint",
.release = ep_device_release,
};
int usb_create_ep_devs(struct device *parent,
struct usb_host_endpoint *endpoint,
struct usb_device *udev)
{
struct ep_device *ep_dev;
int retval;
ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
if (!ep_dev) {
retval = -ENOMEM;
goto exit;
}
ep_dev->desc = &endpoint->desc;
ep_dev->udev = udev;
ep_dev->dev.groups = ep_dev_groups;
ep_dev->dev.type = &usb_ep_device_type;
ep_dev->dev.parent = parent;
dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
retval = device_register(&ep_dev->dev);
if (retval)
goto error_register;
device_enable_async_suspend(&ep_dev->dev);
endpoint->ep_dev = ep_dev;
return retval;
error_register:
put_device(&ep_dev->dev);
exit:
return retval;
}
void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
{
struct ep_device *ep_dev = endpoint->ep_dev;
if (ep_dev) {
device_unregister(&ep_dev->dev);
endpoint->ep_dev = NULL;
}
}
| linux-master | drivers/usb/core/endpoint.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* A wrapper for multiple PHYs which passes all phy_* function calls to
* multiple (actual) PHY devices. This is comes handy when initializing
* all PHYs on a HCD and to keep them all in the same state.
*
* Copyright (C) 2018 Martin Blumenstingl <[email protected]>
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/phy/phy.h>
#include <linux/of.h>
#include "phy.h"
struct usb_phy_roothub {
struct phy *phy;
struct list_head list;
};
static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
struct usb_phy_roothub *roothub_entry;
struct phy *phy;
phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
if (IS_ERR(phy)) {
if (PTR_ERR(phy) == -ENODEV)
return 0;
else
return PTR_ERR(phy);
}
roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
if (!roothub_entry)
return -ENOMEM;
INIT_LIST_HEAD(&roothub_entry->list);
roothub_entry->phy = phy;
list_add_tail(&roothub_entry->list, list);
return 0;
}
struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
{
struct usb_phy_roothub *phy_roothub;
int i, num_phys, err;
if (!IS_ENABLED(CONFIG_GENERIC_PHY))
return NULL;
num_phys = of_count_phandle_with_args(dev->of_node, "phys",
"#phy-cells");
if (num_phys <= 0)
return NULL;
phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
if (!phy_roothub)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&phy_roothub->list);
for (i = 0; i < num_phys; i++) {
err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
if (err)
return ERR_PTR(err);
}
return phy_roothub;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
struct list_head *head;
int err;
if (!phy_roothub)
return 0;
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
err = phy_init(roothub_entry->phy);
if (err)
goto err_exit_phys;
}
return 0;
err_exit_phys:
list_for_each_entry_continue_reverse(roothub_entry, head, list)
phy_exit(roothub_entry->phy);
return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_init);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
struct list_head *head;
int err, ret = 0;
if (!phy_roothub)
return 0;
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
err = phy_exit(roothub_entry->phy);
if (err)
ret = err;
}
return ret;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_exit);
int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
enum phy_mode mode)
{
struct usb_phy_roothub *roothub_entry;
struct list_head *head;
int err;
if (!phy_roothub)
return 0;
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
err = phy_set_mode(roothub_entry->phy, mode);
if (err)
goto err_out;
}
return 0;
err_out:
list_for_each_entry_continue_reverse(roothub_entry, head, list)
phy_power_off(roothub_entry->phy);
return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode);
int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
struct list_head *head;
int err;
if (!phy_roothub)
return 0;
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
err = phy_calibrate(roothub_entry->phy);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
struct list_head *head;
int err;
if (!phy_roothub)
return 0;
head = &phy_roothub->list;
list_for_each_entry(roothub_entry, head, list) {
err = phy_power_on(roothub_entry->phy);
if (err)
goto err_out;
}
return 0;
err_out:
list_for_each_entry_continue_reverse(roothub_entry, head, list)
phy_power_off(roothub_entry->phy);
return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_power_on);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
if (!phy_roothub)
return;
list_for_each_entry_reverse(roothub_entry, &phy_roothub->list, list)
phy_power_off(roothub_entry->phy);
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off);
int usb_phy_roothub_suspend(struct device *controller_dev,
struct usb_phy_roothub *phy_roothub)
{
usb_phy_roothub_power_off(phy_roothub);
/* keep the PHYs initialized so the device can wake up the system */
if (device_may_wakeup(controller_dev))
return 0;
return usb_phy_roothub_exit(phy_roothub);
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend);
int usb_phy_roothub_resume(struct device *controller_dev,
struct usb_phy_roothub *phy_roothub)
{
int err;
/* if the device can't wake up the system _exit was called */
if (!device_may_wakeup(controller_dev)) {
err = usb_phy_roothub_init(phy_roothub);
if (err)
return err;
}
err = usb_phy_roothub_power_on(phy_roothub);
/* undo _init if _power_on failed */
if (err && !device_may_wakeup(controller_dev))
usb_phy_roothub_exit(phy_roothub);
return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
| linux-master | drivers/usb/core/phy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* devices.c
* (C) Copyright 1999 Randy Dunlap.
* (C) Copyright 1999,2000 Thomas Sailer <[email protected]>.
* (proc file per device)
* (C) Copyright 1999 Deti Fliegl (new USB architecture)
*
*************************************************************
*
* <mountpoint>/devices contains USB topology, device, config, class,
* interface, & endpoint data.
*
* I considered using /dev/bus/usb/device# for each device
* as it is attached or detached, but I didn't like this for some
* reason -- maybe it's just too deep of a directory structure.
* I also don't like looking in multiple places to gather and view
* the data. Having only one file for ./devices also prevents race
* conditions that could arise if a program was reading device info
* for devices that are being removed (unplugged). (That is, the
* program may find a directory for devnum_12 then try to open it,
* but it was just unplugged, so the directory is now deleted.
* But programs would just have to be prepared for situations like
* this in any plug-and-play environment.)
*
* 1999-12-16: Thomas Sailer <[email protected]>
* Converted the whole proc stuff to real
* read methods. Now not the whole device list needs to fit
* into one page, only the device list for one bus.
* Added a poll method to /sys/kernel/debug/usb/devices, to wake
* up an eventual usbd
* 2000-01-04: Thomas Sailer <[email protected]>
* Turned into its own filesystem
* 2000-07-05: Ashley Montanaro <[email protected]>
* Converted file reading routine to dump to buffer once
* per device, not per bus
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include "usb.h"
/* Define ALLOW_SERIAL_NUMBER if you want to see the serial number of devices */
#define ALLOW_SERIAL_NUMBER
static const char format_topo[] =
/* T: Bus=dd Lev=dd Prnt=dd Port=dd Cnt=dd Dev#=ddd Spd=dddd MxCh=dd */
"\nT: Bus=%2.2d Lev=%2.2d Prnt=%2.2d Port=%2.2d Cnt=%2.2d Dev#=%3d Spd=%-4s MxCh=%2d\n";
static const char format_string_manufacturer[] =
/* S: Manufacturer=xxxx */
"S: Manufacturer=%.100s\n";
static const char format_string_product[] =
/* S: Product=xxxx */
"S: Product=%.100s\n";
#ifdef ALLOW_SERIAL_NUMBER
static const char format_string_serialnumber[] =
/* S: SerialNumber=xxxx */
"S: SerialNumber=%.100s\n";
#endif
static const char format_bandwidth[] =
/* B: Alloc=ddd/ddd us (xx%), #Int=ddd, #Iso=ddd */
"B: Alloc=%3d/%3d us (%2d%%), #Int=%3d, #Iso=%3d\n";
static const char format_device1[] =
/* D: Ver=xx.xx Cls=xx(sssss) Sub=xx Prot=xx MxPS=dd #Cfgs=dd */
"D: Ver=%2x.%02x Cls=%02x(%-5s) Sub=%02x Prot=%02x MxPS=%2d #Cfgs=%3d\n";
static const char format_device2[] =
/* P: Vendor=xxxx ProdID=xxxx Rev=xx.xx */
"P: Vendor=%04x ProdID=%04x Rev=%2x.%02x\n";
static const char format_config[] =
/* C: #Ifs=dd Cfg#=dd Atr=xx MPwr=dddmA */
"C:%c #Ifs=%2d Cfg#=%2d Atr=%02x MxPwr=%3dmA\n";
static const char format_iad[] =
/* A: FirstIf#=dd IfCount=dd Cls=xx(sssss) Sub=xx Prot=xx */
"A: FirstIf#=%2d IfCount=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x\n";
static const char format_iface[] =
/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/
"I:%c If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n";
static const char format_endpt[] =
/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
"E: Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n";
struct class_info {
int class;
char *class_name;
};
static const struct class_info clas_info[] = {
/* max. 5 chars. per name string */
{USB_CLASS_PER_INTERFACE, ">ifc"},
{USB_CLASS_AUDIO, "audio"},
{USB_CLASS_COMM, "comm."},
{USB_CLASS_HID, "HID"},
{USB_CLASS_PHYSICAL, "PID"},
{USB_CLASS_STILL_IMAGE, "still"},
{USB_CLASS_PRINTER, "print"},
{USB_CLASS_MASS_STORAGE, "stor."},
{USB_CLASS_HUB, "hub"},
{USB_CLASS_CDC_DATA, "data"},
{USB_CLASS_CSCID, "scard"},
{USB_CLASS_CONTENT_SEC, "c-sec"},
{USB_CLASS_VIDEO, "video"},
{USB_CLASS_PERSONAL_HEALTHCARE, "perhc"},
{USB_CLASS_AUDIO_VIDEO, "av"},
{USB_CLASS_BILLBOARD, "blbrd"},
{USB_CLASS_USB_TYPE_C_BRIDGE, "bridg"},
{USB_CLASS_WIRELESS_CONTROLLER, "wlcon"},
{USB_CLASS_MISC, "misc"},
{USB_CLASS_APP_SPEC, "app."},
{USB_CLASS_VENDOR_SPEC, "vend."},
{-1, "unk."} /* leave as last */
};
/*****************************************************************/
static const char *class_decode(const int class)
{
int ix;
for (ix = 0; clas_info[ix].class != -1; ix++)
if (clas_info[ix].class == class)
break;
return clas_info[ix].class_name;
}
static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
const struct usb_endpoint_descriptor *desc)
{
char dir, unit, *type;
unsigned interval, bandwidth = 1;
if (start > end)
return start;
dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
if (speed == USB_SPEED_HIGH)
bandwidth = usb_endpoint_maxp_mult(desc);
/* this isn't checking for illegal values */
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
type = "Ctrl";
dir = 'B'; /* ctrl is bidirectional */
break;
case USB_ENDPOINT_XFER_ISOC:
type = "Isoc";
break;
case USB_ENDPOINT_XFER_BULK:
type = "Bulk";
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
break;
default: /* "can't happen" */
return start;
}
interval = usb_decode_interval(desc, speed);
if (interval % 1000) {
unit = 'u';
} else {
unit = 'm';
interval /= 1000;
}
start += sprintf(start, format_endpt, desc->bEndpointAddress, dir,
desc->bmAttributes, type,
usb_endpoint_maxp(desc) *
bandwidth,
interval, unit);
return start;
}
static char *usb_dump_interface_descriptor(char *start, char *end,
const struct usb_interface_cache *intfc,
const struct usb_interface *iface,
int setno)
{
const struct usb_interface_descriptor *desc;
const char *driver_name = "";
int active = 0;
if (start > end)
return start;
desc = &intfc->altsetting[setno].desc;
if (iface) {
driver_name = (iface->dev.driver
? iface->dev.driver->name
: "(none)");
active = (desc == &iface->cur_altsetting->desc);
}
start += sprintf(start, format_iface,
active ? '*' : ' ', /* mark active altsetting */
desc->bInterfaceNumber,
desc->bAlternateSetting,
desc->bNumEndpoints,
desc->bInterfaceClass,
class_decode(desc->bInterfaceClass),
desc->bInterfaceSubClass,
desc->bInterfaceProtocol,
driver_name);
return start;
}
static char *usb_dump_interface(int speed, char *start, char *end,
const struct usb_interface_cache *intfc,
const struct usb_interface *iface, int setno)
{
const struct usb_host_interface *desc = &intfc->altsetting[setno];
int i;
start = usb_dump_interface_descriptor(start, end, intfc, iface, setno);
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
start = usb_dump_endpoint_descriptor(speed,
start, end, &desc->endpoint[i].desc);
}
return start;
}
static char *usb_dump_iad_descriptor(char *start, char *end,
const struct usb_interface_assoc_descriptor *iad)
{
if (start > end)
return start;
start += sprintf(start, format_iad,
iad->bFirstInterface,
iad->bInterfaceCount,
iad->bFunctionClass,
class_decode(iad->bFunctionClass),
iad->bFunctionSubClass,
iad->bFunctionProtocol);
return start;
}
/* TBD:
* 0. TBDs
* 1. marking active interface altsettings (code lists all, but should mark
* which ones are active, if any)
*/
static char *usb_dump_config_descriptor(char *start, char *end,
const struct usb_config_descriptor *desc,
int active, int speed)
{
int mul;
if (start > end)
return start;
if (speed >= USB_SPEED_SUPER)
mul = 8;
else
mul = 2;
start += sprintf(start, format_config,
/* mark active/actual/current cfg. */
active ? '*' : ' ',
desc->bNumInterfaces,
desc->bConfigurationValue,
desc->bmAttributes,
desc->bMaxPower * mul);
return start;
}
static char *usb_dump_config(int speed, char *start, char *end,
const struct usb_host_config *config, int active)
{
int i, j;
struct usb_interface_cache *intfc;
struct usb_interface *interface;
if (start > end)
return start;
if (!config)
/* getting these some in 2.3.7; none in 2.3.6 */
return start + sprintf(start, "(null Cfg. desc.)\n");
start = usb_dump_config_descriptor(start, end, &config->desc, active,
speed);
for (i = 0; i < USB_MAXIADS; i++) {
if (config->intf_assoc[i] == NULL)
break;
start = usb_dump_iad_descriptor(start, end,
config->intf_assoc[i]);
}
for (i = 0; i < config->desc.bNumInterfaces; i++) {
intfc = config->intf_cache[i];
interface = config->interface[i];
for (j = 0; j < intfc->num_altsetting; j++) {
start = usb_dump_interface(speed,
start, end, intfc, interface, j);
}
}
return start;
}
/*
* Dump the different USB descriptors.
*/
static char *usb_dump_device_descriptor(char *start, char *end,
const struct usb_device_descriptor *desc)
{
u16 bcdUSB = le16_to_cpu(desc->bcdUSB);
u16 bcdDevice = le16_to_cpu(desc->bcdDevice);
if (start > end)
return start;
start += sprintf(start, format_device1,
bcdUSB >> 8, bcdUSB & 0xff,
desc->bDeviceClass,
class_decode(desc->bDeviceClass),
desc->bDeviceSubClass,
desc->bDeviceProtocol,
desc->bMaxPacketSize0,
desc->bNumConfigurations);
if (start > end)
return start;
start += sprintf(start, format_device2,
le16_to_cpu(desc->idVendor),
le16_to_cpu(desc->idProduct),
bcdDevice >> 8, bcdDevice & 0xff);
return start;
}
/*
* Dump the different strings that this device holds.
*/
static char *usb_dump_device_strings(char *start, char *end,
struct usb_device *dev)
{
if (start > end)
return start;
if (dev->manufacturer)
start += sprintf(start, format_string_manufacturer,
dev->manufacturer);
if (start > end)
goto out;
if (dev->product)
start += sprintf(start, format_string_product, dev->product);
if (start > end)
goto out;
#ifdef ALLOW_SERIAL_NUMBER
if (dev->serial)
start += sprintf(start, format_string_serialnumber,
dev->serial);
#endif
out:
return start;
}
static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
{
int i;
start = usb_dump_device_descriptor(start, end, &dev->descriptor);
start = usb_dump_device_strings(start, end, dev);
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
start = usb_dump_config(dev->speed,
start, end, dev->config + i,
/* active ? */
(dev->config + i) == dev->actconfig);
}
return start;
}
/*****************************************************************/
/* This is a recursive function. Parameters:
* buffer - the user-space buffer to write data into
* nbytes - the maximum number of bytes to write
* skip_bytes - the number of bytes to skip before writing anything
* file_offset - the offset into the devices file on completion
* The caller must own the device lock.
*/
static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
loff_t *skip_bytes, loff_t *file_offset,
struct usb_device *usbdev, struct usb_bus *bus,
int level, int index, int count)
{
int chix;
int ret, cnt = 0;
int parent_devnum = 0;
char *pages_start, *data_end, *speed;
unsigned int length;
ssize_t total_written = 0;
struct usb_device *childdev = NULL;
/* don't bother with anything else if we're not writing any data */
if (*nbytes <= 0)
return 0;
if (level > MAX_TOPO_LEVEL)
return 0;
/* allocate 2^1 pages = 8K (on i386);
* should be more than enough for one device */
pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
if (!pages_start)
return -ENOMEM;
if (usbdev->parent && usbdev->parent->devnum != -1)
parent_devnum = usbdev->parent->devnum;
/*
* So the root hub's parent is 0 and any device that is
* plugged into the root hub has a parent of 0.
*/
switch (usbdev->speed) {
case USB_SPEED_LOW:
speed = "1.5"; break;
case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */
case USB_SPEED_FULL:
speed = "12"; break;
case USB_SPEED_HIGH:
speed = "480"; break;
case USB_SPEED_SUPER:
speed = "5000"; break;
case USB_SPEED_SUPER_PLUS:
speed = "10000"; break;
default:
speed = "??";
}
data_end = pages_start + sprintf(pages_start, format_topo,
bus->busnum, level, parent_devnum,
index, count, usbdev->devnum,
speed, usbdev->maxchild);
/*
* level = topology-tier level;
* parent_devnum = parent device number;
* index = parent's connector number;
* count = device count at this level
*/
/* If this is the root hub, display the bandwidth information */
if (level == 0) {
int max;
/* super/high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH ||
usbdev->speed >= USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
/* report "average" periodic allocation over a microsecond.
* the schedules are actually bursty, HCDs need to deal with
* that and just compute/report this average.
*/
data_end += sprintf(data_end, format_bandwidth,
bus->bandwidth_allocated, max,
(100 * bus->bandwidth_allocated + max / 2)
/ max,
bus->bandwidth_int_reqs,
bus->bandwidth_isoc_reqs);
}
data_end = usb_dump_desc(data_end, pages_start + (2 * PAGE_SIZE) - 256,
usbdev);
if (data_end > (pages_start + (2 * PAGE_SIZE) - 256))
data_end += sprintf(data_end, "(truncated)\n");
length = data_end - pages_start;
/* if we can start copying some data to the user */
if (length > *skip_bytes) {
length -= *skip_bytes;
if (length > *nbytes)
length = *nbytes;
if (copy_to_user(*buffer, pages_start + *skip_bytes, length)) {
free_pages((unsigned long)pages_start, 1);
return -EFAULT;
}
*nbytes -= length;
*file_offset += length;
total_written += length;
*buffer += length;
*skip_bytes = 0;
} else
*skip_bytes -= length;
free_pages((unsigned long)pages_start, 1);
/* Now look at all of this device's children. */
usb_hub_for_each_child(usbdev, chix, childdev) {
usb_lock_device(childdev);
ret = usb_device_dump(buffer, nbytes, skip_bytes,
file_offset, childdev, bus,
level + 1, chix - 1, ++cnt);
usb_unlock_device(childdev);
if (ret == -EFAULT)
return total_written;
total_written += ret;
}
return total_written;
}
static ssize_t usb_device_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct usb_bus *bus;
ssize_t ret, total_written = 0;
loff_t skip_bytes = *ppos;
int id;
if (*ppos < 0)
return -EINVAL;
if (nbytes <= 0)
return 0;
mutex_lock(&usb_bus_idr_lock);
/* print devices for all busses */
idr_for_each_entry(&usb_bus_idr, bus, id) {
/* recurse through all children of the root hub */
if (!bus_to_hcd(bus)->rh_registered)
continue;
usb_lock_device(bus->root_hub);
ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
bus->root_hub, bus, 0, 0, 0);
usb_unlock_device(bus->root_hub);
if (ret < 0) {
mutex_unlock(&usb_bus_idr_lock);
return ret;
}
total_written += ret;
}
mutex_unlock(&usb_bus_idr_lock);
return total_written;
}
const struct file_operations usbfs_devices_fops = {
.llseek = no_seek_end_llseek,
.read = usb_device_read,
};
| linux-master | drivers/usb/core/devices.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/generic.c - generic driver for USB devices (not interfaces)
*
* (C) Copyright 2005 Greg Kroah-Hartman <[email protected]>
*
* based on drivers/usb/usb.c which had the following copyrights:
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2004
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
*/
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <uapi/linux/usb/audio.h>
#include "usb.h"
static inline const char *plural(int n)
{
return (n == 1 ? "" : "s");
}
static int is_rndis(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_COMM
&& desc->bInterfaceSubClass == 2
&& desc->bInterfaceProtocol == 0xff;
}
static int is_activesync(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_MISC
&& desc->bInterfaceSubClass == 1
&& desc->bInterfaceProtocol == 1;
}
static bool is_audio(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceClass == USB_CLASS_AUDIO;
}
static bool is_uac3_config(struct usb_interface_descriptor *desc)
{
return desc->bInterfaceProtocol == UAC_VERSION_3;
}
int usb_choose_configuration(struct usb_device *udev)
{
int i;
int num_configs;
int insufficient_power = 0;
struct usb_host_config *c, *best;
if (usb_device_is_owned(udev))
return 0;
best = NULL;
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
for (i = 0; i < num_configs; (i++, c++)) {
struct usb_interface_descriptor *desc = NULL;
/* It's possible that a config has no interfaces! */
if (c->desc.bNumInterfaces > 0)
desc = &c->intf_cache[0]->altsetting->desc;
/*
* HP's USB bus-powered keyboard has only one configuration
* and it claims to be self-powered; other devices may have
* similar errors in their descriptors. If the next test
* were allowed to execute, such configurations would always
* be rejected and the devices would not work as expected.
* In the meantime, we run the risk of selecting a config
* that requires external power at a time when that power
* isn't available. It seems to be the lesser of two evils.
*
* Bugzilla #6448 reports a device that appears to crash
* when it receives a GET_DEVICE_STATUS request! We don't
* have any other way to tell whether a device is self-powered,
* but since we don't use that information anywhere but here,
* the call has been removed.
*
* Maybe the GET_DEVICE_STATUS call and the test below can
* be reinstated when device firmwares become more reliable.
* Don't hold your breath.
*/
#if 0
/* Rule out self-powered configs for a bus-powered device */
if (bus_powered && (c->desc.bmAttributes &
USB_CONFIG_ATT_SELFPOWER))
continue;
#endif
/*
* The next test may not be as effective as it should be.
* Some hubs have errors in their descriptor, claiming
* to be self-powered when they are really bus-powered.
* We will overestimate the amount of current such hubs
* make available for each port.
*
* This is a fairly benign sort of failure. It won't
* cause us to reject configurations that we should have
* accepted.
*/
/* Rule out configs that draw too much bus current */
if (usb_get_max_power(udev, c) > udev->bus_mA) {
insufficient_power++;
continue;
}
/*
* Select first configuration as default for audio so that
* devices that don't comply with UAC3 protocol are supported.
* But, still iterate through other configurations and
* select UAC3 compliant config if present.
*/
if (desc && is_audio(desc)) {
/* Always prefer the first found UAC3 config */
if (is_uac3_config(desc)) {
best = c;
break;
}
/* If there is no UAC3 config, prefer the first config */
else if (i == 0)
best = c;
/* Unconditional continue, because the rest of the code
* in the loop is irrelevant for audio devices, and
* because it can reassign best, which for audio devices
* we don't want.
*/
continue;
}
/* When the first config's first interface is one of Microsoft's
* pet nonstandard Ethernet-over-USB protocols, ignore it unless
* this kernel has enabled the necessary host side driver.
* But: Don't ignore it if it's the only config.
*/
if (i == 0 && num_configs > 1 && desc &&
(is_rndis(desc) || is_activesync(desc))) {
#if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
continue;
#else
best = c;
#endif
}
/* From the remaining configs, choose the first one whose
* first interface is for a non-vendor-specific class.
* Reason: Linux is more likely to have a class driver
* than a vendor-specific driver. */
else if (udev->descriptor.bDeviceClass !=
USB_CLASS_VENDOR_SPEC &&
(desc && desc->bInterfaceClass !=
USB_CLASS_VENDOR_SPEC)) {
best = c;
break;
}
/* If all the remaining configs are vendor-specific,
* choose the first one. */
else if (!best)
best = c;
}
if (insufficient_power > 0)
dev_info(&udev->dev, "rejected %d configuration%s "
"due to insufficient available bus power\n",
insufficient_power, plural(insufficient_power));
if (best) {
i = best->desc.bConfigurationValue;
dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
i, num_configs, plural(num_configs));
} else {
i = -1;
dev_warn(&udev->dev,
"no configuration chosen from %d choice%s\n",
num_configs, plural(num_configs));
}
return i;
}
EXPORT_SYMBOL_GPL(usb_choose_configuration);
static int __check_for_non_generic_match(struct device_driver *drv, void *data)
{
struct usb_device *udev = data;
struct usb_device_driver *udrv;
if (!is_usb_device_driver(drv))
return 0;
udrv = to_usb_device_driver(drv);
if (udrv == &usb_generic_driver)
return 0;
return usb_driver_applicable(udev, udrv);
}
static bool usb_generic_driver_match(struct usb_device *udev)
{
if (udev->use_generic_driver)
return true;
/*
* If any other driver wants the device, leave the device to this other
* driver.
*/
if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_for_non_generic_match))
return false;
return true;
}
int usb_generic_driver_probe(struct usb_device *udev)
{
int err, c;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
if (udev->authorized == 0)
dev_err(&udev->dev, "Device is not authorized for usage\n");
else {
c = usb_choose_configuration(udev);
if (c >= 0) {
err = usb_set_configuration(udev, c);
if (err && err != -ENODEV) {
dev_err(&udev->dev, "can't set config #%d, error %d\n",
c, err);
/* This need not be fatal. The user can try to
* set other configurations. */
}
}
}
/* USB device state == configured ... usable */
usb_notify_add_device(udev);
return 0;
}
void usb_generic_driver_disconnect(struct usb_device *udev)
{
usb_notify_remove_device(udev);
/* if this is only an unbind, not a physical disconnect, then
* unconfigure the device */
if (udev->actconfig)
usb_set_configuration(udev, -1);
}
#ifdef CONFIG_PM
int usb_generic_driver_suspend(struct usb_device *udev, pm_message_t msg)
{
int rc;
/* Normal USB devices suspend through their upstream port.
* Root hubs don't have upstream ports to suspend,
* so we have to shut down their downstream HC-to-USB
* interfaces manually by doing a bus (or "global") suspend.
*/
if (!udev->parent)
rc = hcd_bus_suspend(udev, msg);
/*
* Non-root USB2 devices don't need to do anything for FREEZE
* or PRETHAW. USB3 devices don't support global suspend and
* needs to be selectively suspended.
*/
else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
&& (udev->speed < USB_SPEED_SUPER))
rc = 0;
else
rc = usb_port_suspend(udev, msg);
if (rc == 0)
usbfs_notify_suspend(udev);
return rc;
}
int usb_generic_driver_resume(struct usb_device *udev, pm_message_t msg)
{
int rc;
/* Normal USB devices resume/reset through their upstream port.
* Root hubs don't have upstream ports to resume or reset,
* so we have to start up their downstream HC-to-USB
* interfaces manually by doing a bus (or "global") resume.
*/
if (!udev->parent)
rc = hcd_bus_resume(udev, msg);
else
rc = usb_port_resume(udev, msg);
if (rc == 0)
usbfs_notify_resume(udev);
return rc;
}
#endif /* CONFIG_PM */
struct usb_device_driver usb_generic_driver = {
.name = "usb",
.match = usb_generic_driver_match,
.probe = usb_generic_driver_probe,
.disconnect = usb_generic_driver_disconnect,
#ifdef CONFIG_PM
.suspend = usb_generic_driver_suspend,
.resume = usb_generic_driver_resume,
#endif
.supports_autosuspend = 1,
};
| linux-master | drivers/usb/core/generic.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright David Brownell 2000-2002
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <asm/io.h>
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#endif
#include "usb.h"
/* PCI-based HCs are common, but plenty of non-PCI HCs are used too */
/*
* Coordinate handoffs between EHCI and companion controllers
* during EHCI probing and system resume.
*/
static DECLARE_RWSEM(companions_rwsem);
#define CL_UHCI PCI_CLASS_SERIAL_USB_UHCI
#define CL_OHCI PCI_CLASS_SERIAL_USB_OHCI
#define CL_EHCI PCI_CLASS_SERIAL_USB_EHCI
static inline int is_ohci_or_uhci(struct pci_dev *pdev)
{
return pdev->class == CL_OHCI || pdev->class == CL_UHCI;
}
typedef void (*companion_fn)(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd);
/* Iterate over PCI devices in the same slot as pdev and call fn for each */
static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
companion_fn fn)
{
struct pci_dev *companion;
struct usb_hcd *companion_hcd;
unsigned int slot = PCI_SLOT(pdev->devfn);
/*
* Iterate through other PCI functions in the same slot.
* If the function's drvdata isn't set then it isn't bound to
* a USB host controller driver, so skip it.
*/
companion = NULL;
for_each_pci_dev(companion) {
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
/*
* Companion device should be either UHCI,OHCI or EHCI host
* controller, otherwise skip.
*/
if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
companion->class != CL_EHCI)
continue;
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
fn(pdev, hcd, companion, companion_hcd);
}
}
/*
* We're about to add an EHCI controller, which will unceremoniously grab
* all the port connections away from its companions. To prevent annoying
* error messages, lock the companion's root hub and gracefully unconfigure
* it beforehand. Leave it locked until the EHCI controller is all set.
*/
static void ehci_pre_add(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd)
{
struct usb_device *udev;
if (is_ohci_or_uhci(companion)) {
udev = companion_hcd->self.root_hub;
usb_lock_device(udev);
usb_set_configuration(udev, 0);
}
}
/*
* Adding the EHCI controller has either succeeded or failed. Set the
* companion pointer accordingly, and in either case, reconfigure and
* unlock the root hub.
*/
static void ehci_post_add(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd)
{
struct usb_device *udev;
if (is_ohci_or_uhci(companion)) {
if (dev_get_drvdata(&pdev->dev)) { /* Succeeded */
dev_dbg(&pdev->dev, "HS companion for %s\n",
dev_name(&companion->dev));
companion_hcd->self.hs_companion = &hcd->self;
}
udev = companion_hcd->self.root_hub;
usb_set_configuration(udev, 1);
usb_unlock_device(udev);
}
}
/*
* We just added a non-EHCI controller. Find the EHCI controller to
* which it is a companion, and store a pointer to the bus structure.
*/
static void non_ehci_add(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd)
{
if (is_ohci_or_uhci(pdev) && companion->class == CL_EHCI) {
dev_dbg(&pdev->dev, "FS/LS companion for %s\n",
dev_name(&companion->dev));
hcd->self.hs_companion = &companion_hcd->self;
}
}
/* We are removing an EHCI controller. Clear the companions' pointers. */
static void ehci_remove(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd)
{
if (is_ohci_or_uhci(companion))
companion_hcd->self.hs_companion = NULL;
}
#ifdef CONFIG_PM
/* An EHCI controller must wait for its companions before resuming. */
static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
struct pci_dev *companion, struct usb_hcd *companion_hcd)
{
if (is_ohci_or_uhci(companion))
device_pm_wait_for_dev(&pdev->dev, &companion->dev);
}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
/**
* usb_hcd_pci_probe - initialize PCI-based HCDs
* @dev: USB Host Controller being probed
* @driver: USB HC driver handle
*
* Context: task context, might sleep
*
* Allocates basic PCI resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*
* Store this function in the HCD's struct pci_driver as probe().
*
* Return: 0 if successful.
*/
int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver)
{
struct usb_hcd *hcd;
int retval;
int hcd_irq = 0;
if (usb_disabled())
return -ENODEV;
if (!driver)
return -EINVAL;
if (pci_enable_device(dev) < 0)
return -ENODEV;
/*
* The xHCI driver has its own irq management
* make sure irq setup is not touched for xhci in generic hcd code
*/
if ((driver->flags & HCD_MASK) < HCD_USB3) {
retval = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY | PCI_IRQ_MSI);
if (retval < 0) {
dev_err(&dev->dev,
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
pci_name(dev));
retval = -ENODEV;
goto disable_pci;
}
hcd_irq = pci_irq_vector(dev, 0);
}
hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
if (!hcd) {
retval = -ENOMEM;
goto free_irq_vectors;
}
hcd->amd_resume_bug = (usb_hcd_amd_remote_wakeup_quirk(dev) &&
driver->flags & (HCD_USB11 | HCD_USB3)) ? 1 : 0;
if (driver->flags & HCD_MEMORY) {
/* EHCI, OHCI */
hcd->rsrc_start = pci_resource_start(dev, 0);
hcd->rsrc_len = pci_resource_len(dev, 0);
if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len, driver->description)) {
dev_dbg(&dev->dev, "controller already in use\n");
retval = -EBUSY;
goto put_hcd;
}
hcd->regs = devm_ioremap(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&dev->dev, "error mapping memory\n");
retval = -EFAULT;
goto put_hcd;
}
} else {
/* UHCI */
int region;
for (region = 0; region < PCI_STD_NUM_BARS; region++) {
if (!(pci_resource_flags(dev, region) &
IORESOURCE_IO))
continue;
hcd->rsrc_start = pci_resource_start(dev, region);
hcd->rsrc_len = pci_resource_len(dev, region);
if (devm_request_region(&dev->dev, hcd->rsrc_start,
hcd->rsrc_len, driver->description))
break;
}
if (region == PCI_STD_NUM_BARS) {
dev_dbg(&dev->dev, "no i/o regions available\n");
retval = -EBUSY;
goto put_hcd;
}
}
pci_set_master(dev);
/* Note: dev_set_drvdata must be called while holding the rwsem */
if (dev->class == CL_EHCI) {
down_write(&companions_rwsem);
dev_set_drvdata(&dev->dev, hcd);
for_each_companion(dev, hcd, ehci_pre_add);
retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
if (retval != 0)
dev_set_drvdata(&dev->dev, NULL);
for_each_companion(dev, hcd, ehci_post_add);
up_write(&companions_rwsem);
} else {
down_read(&companions_rwsem);
dev_set_drvdata(&dev->dev, hcd);
retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
if (retval != 0)
dev_set_drvdata(&dev->dev, NULL);
else
for_each_companion(dev, hcd, non_ehci_add);
up_read(&companions_rwsem);
}
if (retval != 0)
goto put_hcd;
device_wakeup_enable(hcd->self.controller);
if (pci_dev_run_wake(dev))
pm_runtime_put_noidle(&dev->dev);
return retval;
put_hcd:
usb_put_hcd(hcd);
free_irq_vectors:
if ((driver->flags & HCD_MASK) < HCD_USB3)
pci_free_irq_vectors(dev);
disable_pci:
pci_disable_device(dev);
dev_err(&dev->dev, "init %s fail, %d\n", pci_name(dev), retval);
return retval;
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_probe);
/* may be called without controller electrically present */
/* may be called with controller, bus, and devices active */
/**
* usb_hcd_pci_remove - shutdown processing for PCI-based HCDs
* @dev: USB Host Controller being removed
*
* Context: task context, might sleep
*
* Reverses the effect of usb_hcd_pci_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
* Store this function in the HCD's struct pci_driver as remove().
*/
void usb_hcd_pci_remove(struct pci_dev *dev)
{
struct usb_hcd *hcd;
int hcd_driver_flags;
hcd = pci_get_drvdata(dev);
if (!hcd)
return;
hcd_driver_flags = hcd->driver->flags;
if (pci_dev_run_wake(dev))
pm_runtime_get_noresume(&dev->dev);
/* Fake an interrupt request in order to give the driver a chance
* to test whether the controller hardware has been removed (e.g.,
* cardbus physical eject).
*/
local_irq_disable();
usb_hcd_irq(0, hcd);
local_irq_enable();
/* Note: dev_set_drvdata must be called while holding the rwsem */
if (dev->class == CL_EHCI) {
down_write(&companions_rwsem);
for_each_companion(dev, hcd, ehci_remove);
usb_remove_hcd(hcd);
dev_set_drvdata(&dev->dev, NULL);
up_write(&companions_rwsem);
} else {
/* Not EHCI; just clear the companion pointer */
down_read(&companions_rwsem);
hcd->self.hs_companion = NULL;
usb_remove_hcd(hcd);
dev_set_drvdata(&dev->dev, NULL);
up_read(&companions_rwsem);
}
usb_put_hcd(hcd);
if ((hcd_driver_flags & HCD_MASK) < HCD_USB3)
pci_free_irq_vectors(dev);
pci_disable_device(dev);
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
/**
* usb_hcd_pci_shutdown - shutdown host controller
* @dev: USB Host Controller being shutdown
*/
void usb_hcd_pci_shutdown(struct pci_dev *dev)
{
struct usb_hcd *hcd;
hcd = pci_get_drvdata(dev);
if (!hcd)
return;
if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) &&
hcd->driver->shutdown) {
hcd->driver->shutdown(hcd);
if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
free_irq(hcd->irq, hcd);
pci_disable_device(dev);
}
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
#ifdef CONFIG_PM
#ifdef CONFIG_PPC_PMAC
static void powermac_set_asic(struct pci_dev *pci_dev, int enable)
{
/* Enanble or disable ASIC clocks for USB */
if (machine_is(powermac)) {
struct device_node *of_node;
of_node = pci_device_to_OF_node(pci_dev);
if (of_node)
pmac_call_feature(PMAC_FTR_USB_ENABLE,
of_node, 0, enable);
}
}
#else
static inline void powermac_set_asic(struct pci_dev *pci_dev, int enable)
{}
#endif /* CONFIG_PPC_PMAC */
static int check_root_hub_suspended(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
if (HCD_RH_RUNNING(hcd)) {
dev_warn(dev, "Root hub is not suspended\n");
return -EBUSY;
}
if (hcd->shared_hcd) {
hcd = hcd->shared_hcd;
if (HCD_RH_RUNNING(hcd)) {
dev_warn(dev, "Secondary root hub is not suspended\n");
return -EBUSY;
}
}
return 0;
}
static int suspend_common(struct device *dev, pm_message_t msg)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
bool do_wakeup;
int retval;
do_wakeup = PMSG_IS_AUTO(msg) ? true : device_may_wakeup(dev);
/* Root hub suspend should have stopped all downstream traffic,
* and all bus master traffic. And done so for both the interface
* and the stub usb_device (which we check here). But maybe it
* didn't; writing sysfs power/state files ignores such rules...
*/
retval = check_root_hub_suspended(dev);
if (retval)
return retval;
if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) {
/* Optimization: Don't suspend if a root-hub wakeup is
* pending and it would cause the HCD to wake up anyway.
*/
if (do_wakeup && HCD_WAKEUP_PENDING(hcd))
return -EBUSY;
if (do_wakeup && hcd->shared_hcd &&
HCD_WAKEUP_PENDING(hcd->shared_hcd))
return -EBUSY;
retval = hcd->driver->pci_suspend(hcd, do_wakeup);
suspend_report_result(dev, hcd->driver->pci_suspend, retval);
/* Check again in case wakeup raced with pci_suspend */
if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) ||
(retval == 0 && do_wakeup && hcd->shared_hcd &&
HCD_WAKEUP_PENDING(hcd->shared_hcd))) {
if (hcd->driver->pci_resume)
hcd->driver->pci_resume(hcd, msg);
retval = -EBUSY;
}
if (retval)
return retval;
}
/* If MSI-X is enabled, the driver will have synchronized all vectors
* in pci_suspend(). If MSI or legacy PCI is enabled, that will be
* synchronized here.
*/
if (!hcd->msix_enabled)
synchronize_irq(pci_irq_vector(pci_dev, 0));
/* Downstream ports from this root hub should already be quiesced, so
* there will be no DMA activity. Now we can shut down the upstream
* link (except maybe for PME# resume signaling). We'll enter a
* low power state during suspend_noirq, if the hardware allows.
*/
pci_disable_device(pci_dev);
return retval;
}
static int resume_common(struct device *dev, pm_message_t msg)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
int retval;
if (HCD_RH_RUNNING(hcd) ||
(hcd->shared_hcd &&
HCD_RH_RUNNING(hcd->shared_hcd))) {
dev_dbg(dev, "can't resume, not suspended!\n");
return 0;
}
retval = pci_enable_device(pci_dev);
if (retval < 0) {
dev_err(dev, "can't re-enable after resume, %d!\n", retval);
return retval;
}
pci_set_master(pci_dev);
if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) {
/*
* Only EHCI controllers have to wait for their companions.
* No locking is needed because PCI controller drivers do not
* get unbound during system resume.
*/
if (pci_dev->class == CL_EHCI && msg.event != PM_EVENT_AUTO_RESUME)
for_each_companion(pci_dev, hcd,
ehci_wait_for_companions);
retval = hcd->driver->pci_resume(hcd, msg);
if (retval) {
dev_err(dev, "PCI post-resume error %d!\n", retval);
usb_hc_died(hcd);
}
}
return retval;
}
#ifdef CONFIG_PM_SLEEP
static int hcd_pci_suspend(struct device *dev)
{
return suspend_common(dev, PMSG_SUSPEND);
}
static int hcd_pci_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
int retval;
retval = check_root_hub_suspended(dev);
if (retval)
return retval;
pci_save_state(pci_dev);
/* If the root hub is dead rather than suspended, disallow remote
* wakeup. usb_hc_died() should ensure that both hosts are marked as
* dying, so we only need to check the primary roothub.
*/
if (HCD_DEAD(hcd))
device_set_wakeup_enable(dev, 0);
dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev));
/* Possibly enable remote wakeup,
* choose the appropriate low-power state, and go to that state.
*/
retval = pci_prepare_to_sleep(pci_dev);
if (retval == -EIO) { /* Low-power not supported */
dev_dbg(dev, "--> PCI D0 legacy\n");
retval = 0;
} else if (retval == 0) {
dev_dbg(dev, "--> PCI %s\n",
pci_power_name(pci_dev->current_state));
} else {
suspend_report_result(dev, pci_prepare_to_sleep, retval);
return retval;
}
powermac_set_asic(pci_dev, 0);
return retval;
}
static int hcd_pci_poweroff_late(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
if (hcd->driver->pci_poweroff_late && !HCD_DEAD(hcd))
return hcd->driver->pci_poweroff_late(hcd, device_may_wakeup(dev));
return 0;
}
static int hcd_pci_resume_noirq(struct device *dev)
{
powermac_set_asic(to_pci_dev(dev), 1);
return 0;
}
static int hcd_pci_resume(struct device *dev)
{
return resume_common(dev, PMSG_RESUME);
}
static int hcd_pci_restore(struct device *dev)
{
return resume_common(dev, PMSG_RESTORE);
}
#else
#define hcd_pci_suspend NULL
#define hcd_pci_suspend_noirq NULL
#define hcd_pci_poweroff_late NULL
#define hcd_pci_resume_noirq NULL
#define hcd_pci_resume NULL
#define hcd_pci_restore NULL
#endif /* CONFIG_PM_SLEEP */
static int hcd_pci_runtime_suspend(struct device *dev)
{
int retval;
retval = suspend_common(dev, PMSG_AUTO_SUSPEND);
if (retval == 0)
powermac_set_asic(to_pci_dev(dev), 0);
dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval);
return retval;
}
static int hcd_pci_runtime_resume(struct device *dev)
{
int retval;
powermac_set_asic(to_pci_dev(dev), 1);
retval = resume_common(dev, PMSG_AUTO_RESUME);
dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval);
return retval;
}
const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend = hcd_pci_suspend,
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
.resume = hcd_pci_resume,
.freeze = hcd_pci_suspend,
.freeze_noirq = check_root_hub_suspended,
.thaw_noirq = NULL,
.thaw = hcd_pci_resume,
.poweroff = hcd_pci_suspend,
.poweroff_late = hcd_pci_poweroff_late,
.poweroff_noirq = hcd_pci_suspend_noirq,
.restore_noirq = hcd_pci_resume_noirq,
.restore = hcd_pci_restore,
.runtime_suspend = hcd_pci_runtime_suspend,
.runtime_resume = hcd_pci_runtime_resume,
};
EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
#endif /* CONFIG_PM */
| linux-master | drivers/usb/core/hcd-pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
*/
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/hcd.h>
#include <linux/usb/quirks.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <asm/byteorder.h>
#include "usb.h"
#define USB_MAXALTSETTING 128 /* Hard limit */
#define USB_MAXCONFIG 8 /* Arbitrary limit */
static inline const char *plural(int n)
{
return (n == 1 ? "" : "s");
}
static int find_next_descriptor(unsigned char *buffer, int size,
int dt1, int dt2, int *num_skipped)
{
struct usb_descriptor_header *h;
int n = 0;
unsigned char *buffer0 = buffer;
/* Find the next descriptor of type dt1 or dt2 */
while (size > 0) {
h = (struct usb_descriptor_header *) buffer;
if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2)
break;
buffer += h->bLength;
size -= h->bLength;
++n;
}
/* Store the number of descriptors skipped and return the
* number of bytes skipped */
if (num_skipped)
*num_skipped = n;
return buffer - buffer0;
}
static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
int cfgno, int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
{
struct usb_ssp_isoc_ep_comp_descriptor *desc;
/*
* The SuperSpeedPlus Isoc endpoint companion descriptor immediately
* follows the SuperSpeed Endpoint Companion descriptor
*/
desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
"for config %d interface %d altsetting %d ep %d.\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
return;
}
memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE);
}
static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
{
struct usb_ss_ep_comp_descriptor *desc;
int max_tx;
/* The SuperSpeed endpoint companion descriptor is supposed to
* be the first thing immediately following the endpoint descriptor.
*/
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
size < USB_DT_SS_EP_COMP_SIZE) {
dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: "
"using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
/* Fill in some default values.
* Leave bmAttributes as zero, which will mean no streams for
* bulk, and isoc won't support multiple bursts of packets.
* With bursts of only one packet, and a Mult of 1, the max
* amount of data moved per endpoint service interval is one
* packet.
*/
ep->ss_ep_comp.bLength = USB_DT_SS_EP_COMP_SIZE;
ep->ss_ep_comp.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc))
ep->ss_ep_comp.wBytesPerInterval =
ep->desc.wMaxPacketSize;
return;
}
buffer += desc->bLength;
size -= desc->bLength;
memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
/* Check the various values */
if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
dev_notice(ddev, "Control endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bMaxBurst = 0;
} else if (desc->bMaxBurst > 15) {
dev_notice(ddev, "Endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 15\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bMaxBurst = 15;
}
if ((usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) &&
desc->bmAttributes != 0) {
dev_notice(ddev, "%s endpoint with bmAttributes = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n",
usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
desc->bmAttributes,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 0;
} else if (usb_endpoint_xfer_bulk(&ep->desc) &&
desc->bmAttributes > 16) {
dev_notice(ddev, "Bulk endpoint with more than 65536 streams in "
"config %d interface %d altsetting %d ep %d: "
"setting to max\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 16;
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
!USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
USB_SS_MULT(desc->bmAttributes) > 3) {
dev_notice(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n",
USB_SS_MULT(desc->bmAttributes),
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 2;
}
if (usb_endpoint_xfer_isoc(&ep->desc))
max_tx = (desc->bMaxBurst + 1) *
(USB_SS_MULT(desc->bmAttributes)) *
usb_endpoint_maxp(&ep->desc);
else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = usb_endpoint_maxp(&ep->desc) *
(desc->bMaxBurst + 1);
else
max_tx = 999999;
if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to %d\n",
usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
le16_to_cpu(desc->wBytesPerInterval),
cfgno, inum, asnum, ep->desc.bEndpointAddress,
max_tx);
ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
}
/* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
if (usb_endpoint_xfer_isoc(&ep->desc) &&
USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
ep, buffer, size);
}
static const unsigned short low_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 8,
[USB_ENDPOINT_XFER_ISOC] = 0,
[USB_ENDPOINT_XFER_BULK] = 0,
[USB_ENDPOINT_XFER_INT] = 8,
};
static const unsigned short full_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1023,
[USB_ENDPOINT_XFER_BULK] = 64,
[USB_ENDPOINT_XFER_INT] = 64,
};
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
/* Bulk should be 512, but some devices use 1024: we will warn below */
[USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 512,
[USB_ENDPOINT_XFER_ISOC] = 1024,
[USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
struct usb_endpoint_descriptor *e2)
{
if (e1->bEndpointAddress == e2->bEndpointAddress)
return true;
if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
return true;
}
return false;
}
/*
* Check for duplicate endpoint addresses in other interfaces and in the
* altsetting currently being parsed.
*/
static bool config_endpoint_is_duplicate(struct usb_host_config *config,
int inum, int asnum, struct usb_endpoint_descriptor *d)
{
struct usb_endpoint_descriptor *epd;
struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
int i, j, k;
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intfc = config->intf_cache[i];
for (j = 0; j < intfc->num_altsetting; ++j) {
alt = &intfc->altsetting[j];
if (alt->desc.bInterfaceNumber == inum &&
alt->desc.bAlternateSetting != asnum)
continue;
for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
epd = &alt->endpoint[k].desc;
if (endpoint_is_duplicate(epd, d))
return true;
}
}
}
return false;
}
static int usb_parse_endpoint(struct device *ddev, int cfgno,
struct usb_host_config *config, int inum, int asnum,
struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
struct usb_device *udev = to_usb_device(ddev);
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
unsigned int maxp;
const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
size -= d->bLength;
if (d->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE)
n = USB_DT_ENDPOINT_AUDIO_SIZE;
else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
n = USB_DT_ENDPOINT_SIZE;
else {
dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint descriptor of length %d, skipping\n",
cfgno, inum, asnum, d->bLength);
goto skip_to_next_endpoint_or_interface_descriptor;
}
i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
if (i >= 16 || i == 0) {
dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Only store as many endpoints as we have room for */
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Ignore some endpoints */
if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) {
if (usb_endpoint_is_ignored(udev, ifp, d)) {
dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum,
d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
/*
* Fix up bInterval values outside the legal range.
* Use 10 or 8 ms if no proper value can be guessed.
*/
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (udev->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/*
* Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
* descriptors. Try to fix those and fall back to an
* 8-ms default value otherwise.
*/
n = fls(d->bInterval*8);
if (n == 0)
n = 7; /* 8 ms = 2^(7-1) uframes */
j = 16;
/*
* Adjust bInterval for quirked devices.
*/
/*
* This quirk fixes bIntervals reported in ms.
*/
if (udev->quirks & USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval) + 3, i, j);
i = j = n;
}
/*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
if (udev->quirks & USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval), i, j);
i = j = n;
}
break;
default: /* USB_SPEED_FULL or _LOW */
/*
* For low-speed, 10 ms is the official minimum.
* But some "overclocked" devices might want faster
* polling so we'll allow it.
*/
n = 10;
break;
}
} else if (usb_endpoint_xfer_isoc(d)) {
i = 1;
j = 16;
switch (udev->speed) {
case USB_SPEED_HIGH:
n = 7; /* 8 ms = 2^(7-1) uframes */
break;
default: /* USB_SPEED_FULL */
n = 4; /* 8 ms = 2^(4-1) frames */
break;
}
}
if (d->bInterval < i || d->bInterval > j) {
dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has an invalid bInterval %d, "
"changing to %d\n",
cfgno, inum, asnum,
d->bEndpointAddress, d->bInterval, n);
endpoint->desc.bInterval = n;
}
/* Some buggy low-speed devices have Bulk endpoints, which is
* explicitly forbidden by the USB spec. In an attempt to make
* them usable, we will try treating them as Interrupt endpoints.
*/
if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
endpoint->desc.bInterval = 1;
if (usb_endpoint_maxp(&endpoint->desc) > 8)
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
/*
* Validate the wMaxPacketSize field.
* Some devices have isochronous endpoints in altsetting 0;
* the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
* (see the end of section 5.6.3), so don't warn about them.
*/
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
}
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
switch (udev->speed) {
case USB_SPEED_LOW:
maxpacket_maxes = low_speed_maxpacket_maxes;
break;
case USB_SPEED_FULL:
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
/* Multiple-transactions bits are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
i = maxp & USB_EP_MAXP_MULT_MASK;
maxp &= ~i;
}
fallthrough;
default:
maxpacket_maxes = high_speed_maxpacket_maxes;
break;
case USB_SPEED_SUPER:
case USB_SPEED_SUPER_PLUS:
maxpacket_maxes = super_speed_maxpacket_maxes;
break;
}
j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
if (maxp > j) {
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
maxp = j;
endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
}
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
* be able to handle that particular bug, so let's warn...
*/
if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
if (maxp != 512)
dev_notice(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
if (udev->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the next endpoint or interface descriptor */
endpoint->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, &n);
endpoint->extralen = i;
retval = buffer - buffer0 + i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "endpoint");
return retval;
skip_to_next_endpoint_or_interface_descriptor:
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, NULL);
return buffer - buffer0 + i;
}
void usb_release_interface_cache(struct kref *ref)
{
struct usb_interface_cache *intfc = ref_to_usb_interface_cache(ref);
int j;
for (j = 0; j < intfc->num_altsetting; j++) {
struct usb_host_interface *alt = &intfc->altsetting[j];
kfree(alt->endpoint);
kfree(alt->string);
}
kfree(intfc);
}
static int usb_parse_interface(struct device *ddev, int cfgno,
struct usb_host_config *config, unsigned char *buffer, int size,
u8 inums[], u8 nalts[])
{
unsigned char *buffer0 = buffer;
struct usb_interface_descriptor *d;
int inum, asnum;
struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
int i, n;
int len, retval;
int num_ep, num_ep_orig;
d = (struct usb_interface_descriptor *) buffer;
buffer += d->bLength;
size -= d->bLength;
if (d->bLength < USB_DT_INTERFACE_SIZE)
goto skip_to_next_interface_descriptor;
/* Which interface entry is this? */
intfc = NULL;
inum = d->bInterfaceNumber;
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
if (inums[i] == inum) {
intfc = config->intf_cache[i];
break;
}
}
if (!intfc || intfc->num_altsetting >= nalts[i])
goto skip_to_next_interface_descriptor;
/* Check for duplicate altsetting entries */
asnum = d->bAlternateSetting;
for ((i = 0, alt = &intfc->altsetting[0]);
i < intfc->num_altsetting;
(++i, ++alt)) {
if (alt->desc.bAlternateSetting == asnum) {
dev_notice(ddev, "Duplicate descriptor for config %d "
"interface %d altsetting %d, skipping\n",
cfgno, inum, asnum);
goto skip_to_next_interface_descriptor;
}
}
++intfc->num_altsetting;
memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE);
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the first endpoint or interface descriptor */
alt->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
USB_DT_INTERFACE, &n);
alt->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "interface");
buffer += i;
size -= i;
/* Allocate space for the right(?) number of endpoints */
num_ep = num_ep_orig = alt->desc.bNumEndpoints;
alt->desc.bNumEndpoints = 0; /* Use as a counter */
if (num_ep > USB_MAXENDPOINTS) {
dev_notice(ddev, "too many endpoints for config %d interface %d "
"altsetting %d: %d, using maximum allowed: %d\n",
cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS);
num_ep = USB_MAXENDPOINTS;
}
if (num_ep > 0) {
/* Can't allocate 0 bytes */
len = sizeof(struct usb_host_endpoint) * num_ep;
alt->endpoint = kzalloc(len, GFP_KERNEL);
if (!alt->endpoint)
return -ENOMEM;
}
/* Parse all the endpoint descriptors */
n = 0;
while (size > 0) {
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
buffer += retval;
size -= retval;
}
if (n != num_ep_orig)
dev_notice(ddev, "config %d interface %d altsetting %d has %d "
"endpoint descriptor%s, different from the interface "
"descriptor's value: %d\n",
cfgno, inum, asnum, n, plural(n), num_ep_orig);
return buffer - buffer0;
skip_to_next_interface_descriptor:
i = find_next_descriptor(buffer, size, USB_DT_INTERFACE,
USB_DT_INTERFACE, NULL);
return buffer - buffer0 + i;
}
static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
struct usb_host_config *config, unsigned char *buffer, int size)
{
struct device *ddev = &dev->dev;
unsigned char *buffer0 = buffer;
int cfgno;
int nintf, nintf_orig;
int i, j, n;
struct usb_interface_cache *intfc;
unsigned char *buffer2;
int size2;
struct usb_descriptor_header *header;
int retval;
u8 inums[USB_MAXINTERFACES], nalts[USB_MAXINTERFACES];
unsigned iad_num = 0;
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
nintf = nintf_orig = config->desc.bNumInterfaces;
config->desc.bNumInterfaces = 0; // Adjusted later
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
dev_notice(ddev, "invalid descriptor for config index %d: "
"type = 0x%X, length = %d\n", cfgidx,
config->desc.bDescriptorType, config->desc.bLength);
return -EINVAL;
}
cfgno = config->desc.bConfigurationValue;
buffer += config->desc.bLength;
size -= config->desc.bLength;
if (nintf > USB_MAXINTERFACES) {
dev_notice(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
cfgno, nintf, USB_MAXINTERFACES);
nintf = USB_MAXINTERFACES;
}
/* Go through the descriptors, checking their length and counting the
* number of altsettings for each interface */
n = 0;
for ((buffer2 = buffer, size2 = size);
size2 > 0;
(buffer2 += header->bLength, size2 -= header->bLength)) {
if (size2 < sizeof(struct usb_descriptor_header)) {
dev_notice(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n",
cfgno, size2, plural(size2));
break;
}
header = (struct usb_descriptor_header *) buffer2;
if ((header->bLength > size2) || (header->bLength < 2)) {
dev_notice(ddev, "config %d has an invalid descriptor "
"of length %d, skipping remainder of the config\n",
cfgno, header->bLength);
break;
}
if (header->bDescriptorType == USB_DT_INTERFACE) {
struct usb_interface_descriptor *d;
int inum;
d = (struct usb_interface_descriptor *) header;
if (d->bLength < USB_DT_INTERFACE_SIZE) {
dev_notice(ddev, "config %d has an invalid "
"interface descriptor of length %d, "
"skipping\n", cfgno, d->bLength);
continue;
}
inum = d->bInterfaceNumber;
if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
n >= nintf_orig) {
dev_notice(ddev, "config %d has more interface "
"descriptors, than it declares in "
"bNumInterfaces, ignoring interface "
"number: %d\n", cfgno, inum);
continue;
}
if (inum >= nintf_orig)
dev_notice(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n",
cfgno, inum, nintf_orig - 1);
/* Have we already encountered this interface?
* Count its altsettings */
for (i = 0; i < n; ++i) {
if (inums[i] == inum)
break;
}
if (i < n) {
if (nalts[i] < 255)
++nalts[i];
} else if (n < USB_MAXINTERFACES) {
inums[n] = inum;
nalts[n] = 1;
++n;
}
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
struct usb_interface_assoc_descriptor *d;
d = (struct usb_interface_assoc_descriptor *)header;
if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
dev_notice(ddev,
"config %d has an invalid interface association descriptor of length %d, skipping\n",
cfgno, d->bLength);
continue;
}
if (iad_num == USB_MAXIADS) {
dev_notice(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
config->intf_assoc[iad_num] = d;
iad_num++;
}
} else if (header->bDescriptorType == USB_DT_DEVICE ||
header->bDescriptorType == USB_DT_CONFIG)
dev_notice(ddev, "config %d contains an unexpected "
"descriptor of type 0x%X, skipping\n",
cfgno, header->bDescriptorType);
} /* for ((buffer2 = buffer, size2 = size); ...) */
size = buffer2 - buffer;
config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
if (n != nintf)
dev_notice(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n",
cfgno, n, plural(n), nintf_orig);
else if (n == 0)
dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n;
/* Check for missing interface numbers */
for (i = 0; i < nintf; ++i) {
for (j = 0; j < nintf; ++j) {
if (inums[j] == i)
break;
}
if (j >= nintf)
dev_notice(ddev, "config %d has no interface number "
"%d\n", cfgno, i);
}
/* Allocate the usb_interface_caches and altsetting arrays */
for (i = 0; i < nintf; ++i) {
j = nalts[i];
if (j > USB_MAXALTSETTING) {
dev_notice(ddev, "too many alternate settings for "
"config %d interface %d: %d, "
"using maximum allowed: %d\n",
cfgno, inums[i], j, USB_MAXALTSETTING);
nalts[i] = j = USB_MAXALTSETTING;
}
intfc = kzalloc(struct_size(intfc, altsetting, j), GFP_KERNEL);
config->intf_cache[i] = intfc;
if (!intfc)
return -ENOMEM;
kref_init(&intfc->ref);
}
/* FIXME: parse the BOS descriptor */
/* Skip over any Class Specific or Vendor Specific descriptors;
* find the first interface descriptor */
config->extra = buffer;
i = find_next_descriptor(buffer, size, USB_DT_INTERFACE,
USB_DT_INTERFACE, &n);
config->extralen = i;
if (n > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
n, plural(n), "configuration");
buffer += i;
size -= i;
/* Parse all the interface/altsetting descriptors */
while (size > 0) {
retval = usb_parse_interface(ddev, cfgno, config,
buffer, size, inums, nalts);
if (retval < 0)
return retval;
buffer += retval;
size -= retval;
}
/* Check for missing altsettings */
for (i = 0; i < nintf; ++i) {
intfc = config->intf_cache[i];
for (j = 0; j < intfc->num_altsetting; ++j) {
for (n = 0; n < intfc->num_altsetting; ++n) {
if (intfc->altsetting[n].desc.
bAlternateSetting == j)
break;
}
if (n >= intfc->num_altsetting)
dev_notice(ddev, "config %d interface %d has no "
"altsetting %d\n", cfgno, inums[i], j);
}
}
return 0;
}
/* hub-only!! ... and only exported for reset/reinit path.
* otherwise used internally on disconnect/destroy path
*/
void usb_destroy_configuration(struct usb_device *dev)
{
int c, i;
if (!dev->config)
return;
if (dev->rawdescriptors) {
for (i = 0; i < dev->descriptor.bNumConfigurations; i++)
kfree(dev->rawdescriptors[i]);
kfree(dev->rawdescriptors);
dev->rawdescriptors = NULL;
}
for (c = 0; c < dev->descriptor.bNumConfigurations; c++) {
struct usb_host_config *cf = &dev->config[c];
kfree(cf->string);
for (i = 0; i < cf->desc.bNumInterfaces; i++) {
if (cf->intf_cache[i])
kref_put(&cf->intf_cache[i]->ref,
usb_release_interface_cache);
}
}
kfree(dev->config);
dev->config = NULL;
}
/*
* Get the USB config descriptors, cache and parse'em
*
* hub-only!! ... and only in reset path, or usb_new_device()
* (used by real hubs and virtual root hubs)
*/
int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
int result;
if (ncfg > USB_MAXCONFIG) {
dev_notice(ddev, "too many configurations: %d, "
"using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
}
if (ncfg < 1) {
dev_err(ddev, "no configurations\n");
return -EINVAL;
}
length = ncfg * sizeof(struct usb_host_config);
dev->config = kzalloc(length, GFP_KERNEL);
if (!dev->config)
return -ENOMEM;
length = ncfg * sizeof(char *);
dev->rawdescriptors = kzalloc(length, GFP_KERNEL);
if (!dev->rawdescriptors)
return -ENOMEM;
desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
if (!desc)
return -ENOMEM;
for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
* the whole configuration is */
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
desc, USB_DT_CONFIG_SIZE);
if (result < 0) {
dev_err(ddev, "unable to read config index %d "
"descriptor/%s: %d\n", cfgno, "start", result);
if (result != -EPIPE)
goto err;
dev_notice(ddev, "chopping to %d config(s)\n", cfgno);
dev->descriptor.bNumConfigurations = cfgno;
break;
} else if (result < 4) {
dev_err(ddev, "config index %d descriptor too short "
"(expected %i, got %i)\n", cfgno,
USB_DT_CONFIG_SIZE, result);
result = -EINVAL;
goto err;
}
length = max((int) le16_to_cpu(desc->wTotalLength),
USB_DT_CONFIG_SIZE);
/* Now that we know the length, get the whole thing */
bigbuffer = kmalloc(length, GFP_KERNEL);
if (!bigbuffer) {
result = -ENOMEM;
goto err;
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (result < 0) {
dev_err(ddev, "unable to read config index %d "
"descriptor/%s\n", cfgno, "all");
kfree(bigbuffer);
goto err;
}
if (result < length) {
dev_notice(ddev, "config index %d descriptor too short "
"(expected %i, got %i)\n", cfgno, length, result);
length = result;
}
dev->rawdescriptors[cfgno] = bigbuffer;
result = usb_parse_configuration(dev, cfgno,
&dev->config[cfgno], bigbuffer, length);
if (result < 0) {
++cfgno;
goto err;
}
}
err:
kfree(desc);
dev->descriptor.bNumConfigurations = cfgno;
return result;
}
void usb_release_bos_descriptor(struct usb_device *dev)
{
if (dev->bos) {
kfree(dev->bos->desc);
kfree(dev->bos);
dev->bos = NULL;
}
}
static const __u8 bos_desc_len[256] = {
[USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE,
[USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE,
[USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE,
[USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1),
[CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE,
[USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE,
};
/* Get BOS descriptor set */
int usb_get_bos_descriptor(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
unsigned char *buffer, *buffer0;
int length, total_len, num, i, ssac;
__u8 cap_type;
int ret;
bos = kzalloc(sizeof(*bos), GFP_KERNEL);
if (!bos)
return -ENOMEM;
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
return ret;
}
length = bos->bLength;
total_len = le16_to_cpu(bos->wTotalLength);
num = bos->bNumDeviceCaps;
kfree(bos);
if (total_len < length)
return -EINVAL;
dev->bos = kzalloc(sizeof(*dev->bos), GFP_KERNEL);
if (!dev->bos)
return -ENOMEM;
/* Now let's get the whole BOS descriptor set */
buffer = kzalloc(total_len, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto err;
}
dev->bos->desc = (struct usb_bos_descriptor *)buffer;
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) {
dev_notice(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0)
ret = -ENOMSG;
goto err;
}
buffer0 = buffer;
total_len -= length;
buffer += length;
for (i = 0; i < num; i++) {
cap = (struct usb_dev_cap_header *)buffer;
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
dev->bos->desc->bNumDeviceCaps = i;
break;
}
cap_type = cap->bDevCapabilityType;
length = cap->bLength;
if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) {
dev->bos->desc->bNumDeviceCaps = i;
break;
}
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_notice(ddev, "descriptor type invalid, skip\n");
continue;
}
switch (cap_type) {
case USB_CAP_TYPE_EXT:
dev->bos->ext_cap =
(struct usb_ext_cap_descriptor *)buffer;
break;
case USB_SS_CAP_TYPE:
dev->bos->ss_cap =
(struct usb_ss_cap_descriptor *)buffer;
break;
case USB_SSP_CAP_TYPE:
ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
USB_SSP_SUBLINK_SPEED_ATTRIBS);
if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
dev->bos->ssp_cap = ssp_cap;
break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
case USB_PTM_CAP_TYPE:
dev->bos->ptm_cap =
(struct usb_ptm_cap_descriptor *)buffer;
break;
default:
break;
}
total_len -= length;
buffer += length;
}
dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
return 0;
err:
usb_release_bos_descriptor(dev);
return ret;
}
| linux-master | drivers/usb/core/config.c |
// SPDX-License-Identifier: GPL-2.0
/*
* of.c The helpers for hcd device tree support
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Author: Peter Chen <[email protected]>
* Copyright (C) 2017 Johan Hovold <[email protected]>
*/
#include <linux/of.h>
#include <linux/usb/of.h>
/**
* usb_of_get_device_node() - get a USB device node
* @hub: hub to which device is connected
* @port1: one-based index of port
*
* Look up the node of a USB device given its parent hub device and one-based
* port number.
*
* Return: A pointer to the node with incremented refcount if found, or
* %NULL otherwise.
*/
struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1)
{
struct device_node *node;
u32 reg;
for_each_child_of_node(hub->dev.of_node, node) {
if (of_property_read_u32(node, "reg", ®))
continue;
if (reg == port1)
return node;
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_of_get_device_node);
/**
* usb_of_has_combined_node() - determine whether a device has a combined node
* @udev: USB device
*
* Determine whether a USB device has a so called combined node which is
* shared with its sole interface. This is the case if and only if the device
* has a node and its descriptors report the following:
*
* 1) bDeviceClass is 0 or 9, and
* 2) bNumConfigurations is 1, and
* 3) bNumInterfaces is 1.
*
* Return: True iff the device has a device node and its descriptors match the
* criteria for a combined node.
*/
bool usb_of_has_combined_node(struct usb_device *udev)
{
struct usb_device_descriptor *ddesc = &udev->descriptor;
struct usb_config_descriptor *cdesc;
if (!udev->dev.of_node)
return false;
switch (ddesc->bDeviceClass) {
case USB_CLASS_PER_INTERFACE:
case USB_CLASS_HUB:
if (ddesc->bNumConfigurations == 1) {
cdesc = &udev->config->desc;
if (cdesc->bNumInterfaces == 1)
return true;
}
}
return false;
}
EXPORT_SYMBOL_GPL(usb_of_has_combined_node);
/**
* usb_of_get_interface_node() - get a USB interface node
* @udev: USB device of interface
* @config: configuration value
* @ifnum: interface number
*
* Look up the node of a USB interface given its USB device, configuration
* value and interface number.
*
* Return: A pointer to the node with incremented refcount if found, or
* %NULL otherwise.
*/
struct device_node *
usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
{
struct device_node *node;
u32 reg[2];
for_each_child_of_node(udev->dev.of_node, node) {
if (of_property_read_u32_array(node, "reg", reg, 2))
continue;
if (reg[0] == ifnum && reg[1] == config)
return node;
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
| linux-master | drivers/usb/core/of.c |
// SPDX-License-Identifier: GPL-2.0
/*
* usb port device code
*
* Copyright (C) 2012 Intel Corp
*
* Author: Lan Tianyu <[email protected]>
*/
#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
#include "hub.h"
static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
static ssize_t early_stop_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sysfs_emit(buf, "%s\n", port_dev->early_stop ? "yes" : "no");
}
static ssize_t early_stop_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
bool value;
if (kstrtobool(buf, &value))
return -EINVAL;
if (value)
port_dev->early_stop = 1;
else
port_dev->early_stop = 0;
return count;
}
static DEVICE_ATTR_RW(early_stop);
static ssize_t disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_interface *intf = to_usb_interface(hub->intfdev);
int port1 = port_dev->portnum;
u16 portstatus, unused;
bool disabled;
int rc;
rc = usb_autopm_get_interface(intf);
if (rc < 0)
return rc;
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
goto out_hdev_lock;
}
usb_hub_port_status(hub, port1, &portstatus, &unused);
disabled = !usb_port_is_power_on(hub, portstatus);
out_hdev_lock:
usb_unlock_device(hdev);
usb_autopm_put_interface(intf);
if (rc)
return rc;
return sysfs_emit(buf, "%s\n", disabled ? "1" : "0");
}
static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_interface *intf = to_usb_interface(hub->intfdev);
int port1 = port_dev->portnum;
bool disabled;
int rc;
rc = kstrtobool(buf, &disabled);
if (rc)
return rc;
rc = usb_autopm_get_interface(intf);
if (rc < 0)
return rc;
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
goto out_hdev_lock;
}
if (disabled && port_dev->child)
usb_disconnect(&port_dev->child);
rc = usb_hub_set_port_power(hdev, hub, port1, !disabled);
if (disabled) {
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
if (!port_dev->is_superspeed)
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
}
if (!rc)
rc = count;
out_hdev_lock:
usb_unlock_device(hdev);
usb_autopm_put_interface(intf);
return rc;
}
static DEVICE_ATTR_RW(disable);
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sprintf(buf, "0x%08x\n", port_dev->location);
}
static DEVICE_ATTR_RO(location);
static ssize_t connect_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
char *result;
switch (port_dev->connect_type) {
case USB_PORT_CONNECT_TYPE_HOT_PLUG:
result = "hotplug";
break;
case USB_PORT_CONNECT_TYPE_HARD_WIRED:
result = "hardwired";
break;
case USB_PORT_NOT_USED:
result = "not used";
break;
default:
result = "unknown";
break;
}
return sprintf(buf, "%s\n", result);
}
static DEVICE_ATTR_RO(connect_type);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
enum usb_device_state state = READ_ONCE(port_dev->state);
return sysfs_emit(buf, "%s\n", usb_state_string(state));
}
static DEVICE_ATTR_RO(state);
static ssize_t over_current_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sprintf(buf, "%u\n", port_dev->over_current_count);
}
static DEVICE_ATTR_RO(over_current_count);
static ssize_t quirks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
return sprintf(buf, "%08x\n", port_dev->quirks);
}
static ssize_t quirks_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
u32 value;
if (kstrtou32(buf, 16, &value))
return -EINVAL;
port_dev->quirks = value;
return count;
}
static DEVICE_ATTR_RW(quirks);
static ssize_t usb3_lpm_permit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
const char *p;
if (port_dev->usb3_lpm_u1_permit) {
if (port_dev->usb3_lpm_u2_permit)
p = "u1_u2";
else
p = "u1";
} else {
if (port_dev->usb3_lpm_u2_permit)
p = "u2";
else
p = "0";
}
return sprintf(buf, "%s\n", p);
}
static ssize_t usb3_lpm_permit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *udev = port_dev->child;
struct usb_hcd *hcd;
if (!strncmp(buf, "u1_u2", 5)) {
port_dev->usb3_lpm_u1_permit = 1;
port_dev->usb3_lpm_u2_permit = 1;
} else if (!strncmp(buf, "u1", 2)) {
port_dev->usb3_lpm_u1_permit = 1;
port_dev->usb3_lpm_u2_permit = 0;
} else if (!strncmp(buf, "u2", 2)) {
port_dev->usb3_lpm_u1_permit = 0;
port_dev->usb3_lpm_u2_permit = 1;
} else if (!strncmp(buf, "0", 1)) {
port_dev->usb3_lpm_u1_permit = 0;
port_dev->usb3_lpm_u2_permit = 0;
} else
return -EINVAL;
/* If device is connected to the port, disable or enable lpm
* to make new u1 u2 setting take effect immediately.
*/
if (udev) {
hcd = bus_to_hcd(udev->bus);
if (!hcd)
return -EINVAL;
usb_lock_device(udev);
mutex_lock(hcd->bandwidth_mutex);
if (!usb_disable_lpm(udev))
usb_enable_lpm(udev);
mutex_unlock(hcd->bandwidth_mutex);
usb_unlock_device(udev);
}
return count;
}
static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
&dev_attr_state.attr,
&dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
&dev_attr_disable.attr,
&dev_attr_early_stop.attr,
NULL,
};
static const struct attribute_group port_dev_attr_grp = {
.attrs = port_dev_attrs,
};
static const struct attribute_group *port_dev_group[] = {
&port_dev_attr_grp,
NULL,
};
static struct attribute *port_dev_usb3_attrs[] = {
&dev_attr_usb3_lpm_permit.attr,
NULL,
};
static const struct attribute_group port_dev_usb3_attr_grp = {
.attrs = port_dev_usb3_attrs,
};
static const struct attribute_group *port_dev_usb3_group[] = {
&port_dev_attr_grp,
&port_dev_usb3_attr_grp,
NULL,
};
static void usb_port_device_release(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
kfree(port_dev->req);
kfree(port_dev);
}
#ifdef CONFIG_PM
static int usb_port_runtime_resume(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_interface *intf = to_usb_interface(dev->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_device *udev = port_dev->child;
struct usb_port *peer = port_dev->peer;
int port1 = port_dev->portnum;
int retval;
if (!hub)
return -EINVAL;
if (hub->in_reset) {
set_bit(port1, hub->power_bits);
return 0;
}
/*
* Power on our usb3 peer before this usb2 port to prevent a usb3
* device from degrading to its usb2 connection
*/
if (!port_dev->is_superspeed && peer)
pm_runtime_get_sync(&peer->dev);
retval = usb_autopm_get_interface(intf);
if (retval < 0)
return retval;
retval = usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
if (udev && !retval) {
/*
* Our preference is to simply wait for the port to reconnect,
* as that is the lowest latency method to restart the port.
* However, there are cases where toggling port power results in
* the host port and the device port getting out of sync causing
* a link training live lock. Upon timeout, flag the port as
* needing warm reset recovery (to be performed later by
* usb_port_resume() as requested via usb_wakeup_notification())
*/
if (hub_port_debounce_be_connected(hub, port1) < 0) {
dev_dbg(&port_dev->dev, "reconnect timeout\n");
if (hub_is_superspeed(hdev))
set_bit(port1, hub->warm_reset_bits);
}
/* Force the child awake to revalidate after the power loss. */
if (!test_and_set_bit(port1, hub->child_usage_bits)) {
pm_runtime_get_noresume(&port_dev->dev);
pm_request_resume(&udev->dev);
}
}
usb_autopm_put_interface(intf);
return retval;
}
static int usb_port_runtime_suspend(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
struct usb_device *hdev = to_usb_device(dev->parent->parent);
struct usb_interface *intf = to_usb_interface(dev->parent);
struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
struct usb_port *peer = port_dev->peer;
int port1 = port_dev->portnum;
int retval;
if (!hub)
return -EINVAL;
if (hub->in_reset)
return -EBUSY;
if (dev_pm_qos_flags(&port_dev->dev, PM_QOS_FLAG_NO_POWER_OFF)
== PM_QOS_FLAGS_ALL)
return -EAGAIN;
if (usb_port_block_power_off)
return -EBUSY;
retval = usb_autopm_get_interface(intf);
if (retval < 0)
return retval;
retval = usb_hub_set_port_power(hdev, hub, port1, false);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
if (!port_dev->is_superspeed)
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
usb_autopm_put_interface(intf);
/*
* Our peer usb3 port may now be able to suspend, so
* asynchronously queue a suspend request to observe that this
* usb2 port is now off.
*/
if (!port_dev->is_superspeed && peer)
pm_runtime_put(&peer->dev);
return retval;
}
#endif
static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
if (port_dev->child)
usb_disable_usb2_hardware_lpm(port_dev->child);
}
static const struct dev_pm_ops usb_port_pm_ops = {
#ifdef CONFIG_PM
.runtime_suspend = usb_port_runtime_suspend,
.runtime_resume = usb_port_runtime_resume,
#endif
};
struct device_type usb_port_device_type = {
.name = "usb_port",
.release = usb_port_device_release,
.pm = &usb_port_pm_ops,
};
static struct device_driver usb_port_driver = {
.name = "usb",
.owner = THIS_MODULE,
.shutdown = usb_port_shutdown,
};
static int link_peers(struct usb_port *left, struct usb_port *right)
{
struct usb_port *ss_port, *hs_port;
int rc;
if (left->peer == right && right->peer == left)
return 0;
if (left->peer || right->peer) {
struct usb_port *lpeer = left->peer;
struct usb_port *rpeer = right->peer;
char *method;
if (left->location && left->location == right->location)
method = "location";
else
method = "default";
pr_debug("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
dev_name(&left->dev), dev_name(&right->dev), method,
dev_name(&left->dev),
lpeer ? dev_name(&lpeer->dev) : "none",
dev_name(&right->dev),
rpeer ? dev_name(&rpeer->dev) : "none");
return -EBUSY;
}
rc = sysfs_create_link(&left->dev.kobj, &right->dev.kobj, "peer");
if (rc)
return rc;
rc = sysfs_create_link(&right->dev.kobj, &left->dev.kobj, "peer");
if (rc) {
sysfs_remove_link(&left->dev.kobj, "peer");
return rc;
}
/*
* We need to wake the HiSpeed port to make sure we don't race
* setting ->peer with usb_port_runtime_suspend(). Otherwise we
* may miss a suspend event for the SuperSpeed port.
*/
if (left->is_superspeed) {
ss_port = left;
WARN_ON(right->is_superspeed);
hs_port = right;
} else {
ss_port = right;
WARN_ON(!right->is_superspeed);
hs_port = left;
}
pm_runtime_get_sync(&hs_port->dev);
left->peer = right;
right->peer = left;
/*
* The SuperSpeed reference is dropped when the HiSpeed port in
* this relationship suspends, i.e. when it is safe to allow a
* SuperSpeed connection to drop since there is no risk of a
* device degrading to its powered-off HiSpeed connection.
*
* Also, drop the HiSpeed ref taken above.
*/
pm_runtime_get_sync(&ss_port->dev);
pm_runtime_put(&hs_port->dev);
return 0;
}
static void link_peers_report(struct usb_port *left, struct usb_port *right)
{
int rc;
rc = link_peers(left, right);
if (rc == 0) {
dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev));
} else {
dev_dbg(&left->dev, "failed to peer to %s (%d)\n",
dev_name(&right->dev), rc);
pr_warn_once("usb: port power management may be unreliable\n");
usb_port_block_power_off = 1;
}
}
static void unlink_peers(struct usb_port *left, struct usb_port *right)
{
struct usb_port *ss_port, *hs_port;
WARN(right->peer != left || left->peer != right,
"%s and %s are not peers?\n",
dev_name(&left->dev), dev_name(&right->dev));
/*
* We wake the HiSpeed port to make sure we don't race its
* usb_port_runtime_resume() event which takes a SuperSpeed ref
* when ->peer is !NULL.
*/
if (left->is_superspeed) {
ss_port = left;
hs_port = right;
} else {
ss_port = right;
hs_port = left;
}
pm_runtime_get_sync(&hs_port->dev);
sysfs_remove_link(&left->dev.kobj, "peer");
right->peer = NULL;
sysfs_remove_link(&right->dev.kobj, "peer");
left->peer = NULL;
/* Drop the SuperSpeed ref held on behalf of the active HiSpeed port */
pm_runtime_put(&ss_port->dev);
/* Drop the ref taken above */
pm_runtime_put(&hs_port->dev);
}
/*
* For each usb hub device in the system check to see if it is in the
* peer domain of the given port_dev, and if it is check to see if it
* has a port that matches the given port by location
*/
static int match_location(struct usb_device *peer_hdev, void *p)
{
int port1;
struct usb_hcd *hcd, *peer_hcd;
struct usb_port *port_dev = p, *peer;
struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
if (!peer_hub)
return 0;
hcd = bus_to_hcd(hdev->bus);
peer_hcd = bus_to_hcd(peer_hdev->bus);
/* peer_hcd is provisional until we verify it against the known peer */
if (peer_hcd != hcd->shared_hcd)
return 0;
for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
peer = peer_hub->ports[port1 - 1];
if (peer && peer->location == port_dev->location) {
link_peers_report(port_dev, peer);
return 1; /* done */
}
}
return 0;
}
/*
* Find the peer port either via explicit platform firmware "location"
* data, the peer hcd for root hubs, or the upstream peer relationship
* for all other hubs.
*/
static void find_and_link_peer(struct usb_hub *hub, int port1)
{
struct usb_port *port_dev = hub->ports[port1 - 1], *peer;
struct usb_device *hdev = hub->hdev;
struct usb_device *peer_hdev;
struct usb_hub *peer_hub;
/*
* If location data is available then we can only peer this port
* by a location match, not the default peer (lest we create a
* situation where we need to go back and undo a default peering
* when the port is later peered by location data)
*/
if (port_dev->location) {
/* we link the peer in match_location() if found */
usb_for_each_dev(port_dev, match_location);
return;
} else if (!hdev->parent) {
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
struct usb_hcd *peer_hcd = hcd->shared_hcd;
if (!peer_hcd)
return;
peer_hdev = peer_hcd->self.root_hub;
} else {
struct usb_port *upstream;
struct usb_device *parent = hdev->parent;
struct usb_hub *parent_hub = usb_hub_to_struct_hub(parent);
if (!parent_hub)
return;
upstream = parent_hub->ports[hdev->portnum - 1];
if (!upstream || !upstream->peer)
return;
peer_hdev = upstream->peer->child;
}
peer_hub = usb_hub_to_struct_hub(peer_hdev);
if (!peer_hub || port1 > peer_hdev->maxchild)
return;
/*
* we found a valid default peer, last check is to make sure it
* does not have location data
*/
peer = peer_hub->ports[port1 - 1];
if (peer && peer->location == 0)
link_peers_report(port_dev, peer);
}
static int connector_bind(struct device *dev, struct device *connector, void *data)
{
int ret;
ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
if (ret)
return ret;
ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
if (ret)
sysfs_remove_link(&dev->kobj, "connector");
return ret;
}
static void connector_unbind(struct device *dev, struct device *connector, void *data)
{
sysfs_remove_link(&connector->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "connector");
}
static const struct component_ops connector_ops = {
.bind = connector_bind,
.unbind = connector_unbind,
};
int usb_hub_create_port_device(struct usb_hub *hub, int port1)
{
struct usb_port *port_dev;
struct usb_device *hdev = hub->hdev;
int retval;
port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
if (!port_dev)
return -ENOMEM;
port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
if (!port_dev->req) {
kfree(port_dev);
return -ENOMEM;
}
hub->ports[port1 - 1] = port_dev;
port_dev->portnum = port1;
set_bit(port1, hub->power_bits);
port_dev->dev.parent = hub->intfdev;
if (hub_is_superspeed(hdev)) {
port_dev->usb3_lpm_u1_permit = 1;
port_dev->usb3_lpm_u2_permit = 1;
port_dev->dev.groups = port_dev_usb3_group;
} else
port_dev->dev.groups = port_dev_group;
port_dev->dev.type = &usb_port_device_type;
port_dev->dev.driver = &usb_port_driver;
if (hub_is_superspeed(hub->hdev))
port_dev->is_superspeed = 1;
dev_set_name(&port_dev->dev, "%s-port%d", dev_name(&hub->hdev->dev),
port1);
mutex_init(&port_dev->status_lock);
retval = device_register(&port_dev->dev);
if (retval) {
put_device(&port_dev->dev);
return retval;
}
port_dev->state_kn = sysfs_get_dirent(port_dev->dev.kobj.sd, "state");
if (!port_dev->state_kn) {
dev_err(&port_dev->dev, "failed to sysfs_get_dirent 'state'\n");
retval = -ENODEV;
goto err_unregister;
}
/* Set default policy of port-poweroff disabled. */
retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
if (retval < 0) {
goto err_put_kn;
}
retval = component_add(&port_dev->dev, &connector_ops);
if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
goto err_put_kn;
}
find_and_link_peer(hub, port1);
/*
* Enable runtime pm and hold a refernce that hub_configure()
* will drop once the PM_QOS_NO_POWER_OFF flag state has been set
* and the hub has been fully registered (hdev->maxchild set).
*/
pm_runtime_set_active(&port_dev->dev);
pm_runtime_get_noresume(&port_dev->dev);
pm_runtime_enable(&port_dev->dev);
device_enable_async_suspend(&port_dev->dev);
/*
* Keep hidden the ability to enable port-poweroff if the hub
* does not support power switching.
*/
if (!hub_is_port_power_switchable(hub))
return 0;
/* Attempt to let userspace take over the policy. */
retval = dev_pm_qos_expose_flags(&port_dev->dev,
PM_QOS_FLAG_NO_POWER_OFF);
if (retval < 0) {
dev_warn(&port_dev->dev, "failed to expose pm_qos_no_poweroff\n");
return 0;
}
/* Userspace owns the policy, drop the kernel 'no_poweroff' request. */
retval = dev_pm_qos_remove_request(port_dev->req);
if (retval >= 0) {
kfree(port_dev->req);
port_dev->req = NULL;
}
return 0;
err_put_kn:
sysfs_put(port_dev->state_kn);
err_unregister:
device_unregister(&port_dev->dev);
return retval;
}
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_port *peer;
peer = port_dev->peer;
if (peer)
unlink_peers(port_dev, peer);
component_del(&port_dev->dev, &connector_ops);
sysfs_put(port_dev->state_kn);
device_unregister(&port_dev->dev);
}
| linux-master | drivers/usb/core/port.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB device quirk handling logic and table
*
* Copyright (c) 2007 Oliver Neukum
* Copyright (c) 2007 Greg Kroah-Hartman <[email protected]>
*/
#include <linux/moduleparam.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h>
#include "usb.h"
struct quirk_entry {
u16 vid;
u16 pid;
u32 flags;
};
static DEFINE_MUTEX(quirk_mutex);
static struct quirk_entry *quirk_list;
static unsigned int quirk_count;
static char quirks_param[128];
static int quirks_param_set(const char *value, const struct kernel_param *kp)
{
char *val, *p, *field;
u16 vid, pid;
u32 flags;
size_t i;
int err;
val = kstrdup(value, GFP_KERNEL);
if (!val)
return -ENOMEM;
err = param_set_copystring(val, kp);
if (err) {
kfree(val);
return err;
}
mutex_lock(&quirk_mutex);
if (!*val) {
quirk_count = 0;
kfree(quirk_list);
quirk_list = NULL;
goto unlock;
}
for (quirk_count = 1, i = 0; val[i]; i++)
if (val[i] == ',')
quirk_count++;
if (quirk_list) {
kfree(quirk_list);
quirk_list = NULL;
}
quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
GFP_KERNEL);
if (!quirk_list) {
quirk_count = 0;
mutex_unlock(&quirk_mutex);
kfree(val);
return -ENOMEM;
}
for (i = 0, p = val; p && *p;) {
/* Each entry consists of VID:PID:flags */
field = strsep(&p, ":");
if (!field)
break;
if (kstrtou16(field, 16, &vid))
break;
field = strsep(&p, ":");
if (!field)
break;
if (kstrtou16(field, 16, &pid))
break;
field = strsep(&p, ",");
if (!field || !*field)
break;
/* Collect the flags */
for (flags = 0; *field; field++) {
switch (*field) {
case 'a':
flags |= USB_QUIRK_STRING_FETCH_255;
break;
case 'b':
flags |= USB_QUIRK_RESET_RESUME;
break;
case 'c':
flags |= USB_QUIRK_NO_SET_INTF;
break;
case 'd':
flags |= USB_QUIRK_CONFIG_INTF_STRINGS;
break;
case 'e':
flags |= USB_QUIRK_RESET;
break;
case 'f':
flags |= USB_QUIRK_HONOR_BNUMINTERFACES;
break;
case 'g':
flags |= USB_QUIRK_DELAY_INIT;
break;
case 'h':
flags |= USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL;
break;
case 'i':
flags |= USB_QUIRK_DEVICE_QUALIFIER;
break;
case 'j':
flags |= USB_QUIRK_IGNORE_REMOTE_WAKEUP;
break;
case 'k':
flags |= USB_QUIRK_NO_LPM;
break;
case 'l':
flags |= USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL;
break;
case 'm':
flags |= USB_QUIRK_DISCONNECT_SUSPEND;
break;
case 'n':
flags |= USB_QUIRK_DELAY_CTRL_MSG;
break;
case 'o':
flags |= USB_QUIRK_HUB_SLOW_RESET;
break;
/* Ignore unrecognized flag characters */
}
}
quirk_list[i++] = (struct quirk_entry)
{ .vid = vid, .pid = pid, .flags = flags };
}
if (i < quirk_count)
quirk_count = i;
unlock:
mutex_unlock(&quirk_mutex);
kfree(val);
return 0;
}
static const struct kernel_param_ops quirks_param_ops = {
.set = quirks_param_set,
.get = param_get_string,
};
static struct kparam_string quirks_param_string = {
.maxlen = sizeof(quirks_param),
.string = quirks_param,
};
device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
/* Lists of quirky USB devices, split in device quirks and interface quirks.
* Device quirks are applied at the very beginning of the enumeration process,
* right after reading the device descriptor. They can thus only match on device
* information.
*
* Interface quirks are applied after reading all the configuration descriptors.
* They can match on both device and interface information.
*
* Note that the DELAY_INIT and HONOR_BNUMINTERFACES quirks do not make sense as
* interface quirks, as they only influence the enumeration process which is run
* before processing the interface quirks.
*
* Please keep the lists ordered by:
* 1) Vendor ID
* 2) Product ID
* 3) Class ID
*/
static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
/* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
{ USB_DEVICE(0x0218, 0x0201), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* WORLDE easy key (easykey.25) MIDI controller */
{ USB_DEVICE(0x0218, 0x0401), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* HP v222w 16GB Mini USB Drive */
{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
/* USB3503 */
{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
/* Microsoft Wireless Laser Mouse 6000 Receiver */
{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
/* Microsoft Surface Dock Ethernet (RTL8153 GigE) */
{ USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM },
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech Screen Share */
{ USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Orbit MP */
{ USB_DEVICE(0x046d, 0x08c2), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Pro for Notebook */
{ USB_DEVICE(0x046d, 0x08c3), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam Pro 5000 */
{ USB_DEVICE(0x046d, 0x08c5), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam OEM Dell Notebook */
{ USB_DEVICE(0x046d, 0x08c6), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Quickcam OEM Cisco VT Camera II */
{ USB_DEVICE(0x046d, 0x08c7), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Harmony 700-series */
{ USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
/* Plantronic Audio 655 DSP */
{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
/* Plantronic Audio 648 USB */
{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
/* Artisman Watchdog Dongle */
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Microchip Joss Optical infrared touchboard device */
{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* CarrolTouch 4000U */
{ USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
/* CarrolTouch 4500U */
{ USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Elan Touchscreen */
{ USB_DEVICE(0x04f3, 0x0089), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x009b), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x010c), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x0125), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
{ USB_DEVICE(0x04f3, 0x0381), .driver_info =
USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
USB_QUIRK_DEVICE_QUALIFIER },
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
/* Edirol SD-20 */
{ USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
/* Alcor Micro Corp. Hub */
{ USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
/* ELSA MicroLink 56K */
{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Agfa SNAPSCAN 1212U */
{ USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Webcam Hercules Dualpix Exchange*/
{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Hercules DJ Console audio card (BZ 208357) */
{ USB_DEVICE(0x06f8, 0xb000), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
/* SanDisk Ultra Fit and Ultra Flair */
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
/* Baum Vario Ultra */
{ USB_DEVICE(0x0904, 0x6101), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
{ USB_DEVICE(0x0904, 0x6102), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
/* Sound Devices USBPre2 */
{ USB_DEVICE(0x0926, 0x0202), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
/* Sound Devices MixPre-D */
{ USB_DEVICE(0x0926, 0x0208), .driver_info =
USB_QUIRK_ENDPOINT_IGNORE },
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
/* NVIDIA Jetson devices in Force Recovery mode */
{ USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
/* ELMO L-12F document camera */
{ USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG },
/* Broadcom BCM92035DGROM BT dongle */
{ USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
/* MAYA44USB sound device */
{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
/* ASUS Base Station(T100) */
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
{ USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
/* Generic RTL8153 based ethernet adapters */
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
/* SONiX USB DEVICE Touchpad */
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* novation SoundControl XL */
{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
/* Focusrite Scarlett Solo USB */
{ USB_DEVICE(0x1235, 0x8211), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
{ USB_DEVICE(0x12d1, 0x15c3), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
/* Razer - Razer Blade Keyboard */
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */
{ USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
/* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
{ USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
/* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
{ USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
/* BUILDWIN Photo Frame */
{ USB_DEVICE(0x1908, 0x1315), .driver_info =
USB_QUIRK_HONOR_BNUMINTERFACES },
/* Protocol and OTG Electrical Test Device */
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Terminus Technology Inc. Hub */
{ USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
/* Corsair K70 RGB */
{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe */
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair K70 LUX RGB */
{ USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
/* Corsair K70 RGB RAPDIFIRE */
{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design Intensity Shuttle */
{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Raydium Touchscreen */
{ USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
/* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
{ USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
/* VCOM device */
{ USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
static const struct usb_device_id usb_interface_quirk_list[] = {
/* Logitech UVC Cameras */
{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
.driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
static const struct usb_device_id usb_amd_resume_quirk_list[] = {
/* Lenovo Mouse with Pixart controller */
{ USB_DEVICE(0x17ef, 0x602e), .driver_info = USB_QUIRK_RESET_RESUME },
/* Pixart Mouse */
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
{ } /* terminating entry must be last */
};
/*
* Entries for endpoints that should be ignored when parsing configuration
* descriptors.
*
* Matched for devices with USB_QUIRK_ENDPOINT_IGNORE.
*/
static const struct usb_device_id usb_endpoint_ignore[] = {
{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
{ }
};
bool usb_endpoint_is_ignored(struct usb_device *udev,
struct usb_host_interface *intf,
struct usb_endpoint_descriptor *epd)
{
const struct usb_device_id *id;
unsigned int address;
for (id = usb_endpoint_ignore; id->match_flags; ++id) {
if (!usb_match_device(udev, id))
continue;
if (!usb_match_one_id_intf(udev, intf, id))
continue;
address = id->driver_info;
if (address == epd->bEndpointAddress)
return true;
}
return false;
}
static bool usb_match_any_interface(struct usb_device *udev,
const struct usb_device_id *id)
{
unsigned int i;
for (i = 0; i < udev->descriptor.bNumConfigurations; ++i) {
struct usb_host_config *cfg = &udev->config[i];
unsigned int j;
for (j = 0; j < cfg->desc.bNumInterfaces; ++j) {
struct usb_interface_cache *cache;
struct usb_host_interface *intf;
cache = cfg->intf_cache[j];
if (cache->num_altsetting == 0)
continue;
intf = &cache->altsetting[0];
if (usb_match_one_id_intf(udev, intf, id))
return true;
}
}
return false;
}
static int usb_amd_resume_quirk(struct usb_device *udev)
{
struct usb_hcd *hcd;
hcd = bus_to_hcd(udev->bus);
/* The device should be attached directly to root hub */
if (udev->level == 1 && hcd->amd_resume_bug == 1)
return 1;
return 0;
}
static u32 usb_detect_static_quirks(struct usb_device *udev,
const struct usb_device_id *id)
{
u32 quirks = 0;
for (; id->match_flags; id++) {
if (!usb_match_device(udev, id))
continue;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_INFO) &&
!usb_match_any_interface(udev, id))
continue;
quirks |= (u32)(id->driver_info);
}
return quirks;
}
static u32 usb_detect_dynamic_quirks(struct usb_device *udev)
{
u16 vid = le16_to_cpu(udev->descriptor.idVendor);
u16 pid = le16_to_cpu(udev->descriptor.idProduct);
int i, flags = 0;
mutex_lock(&quirk_mutex);
for (i = 0; i < quirk_count; i++) {
if (vid == quirk_list[i].vid && pid == quirk_list[i].pid) {
flags = quirk_list[i].flags;
break;
}
}
mutex_unlock(&quirk_mutex);
return flags;
}
/*
* Detect any quirks the device has, and do any housekeeping for it if needed.
*/
void usb_detect_quirks(struct usb_device *udev)
{
udev->quirks = usb_detect_static_quirks(udev, usb_quirk_list);
/*
* Pixart-based mice would trigger remote wakeup issue on AMD
* Yangtze chipset, so set them as RESET_RESUME flag.
*/
if (usb_amd_resume_quirk(udev))
udev->quirks |= usb_detect_static_quirks(udev,
usb_amd_resume_quirk_list);
udev->quirks ^= usb_detect_dynamic_quirks(udev);
if (udev->quirks)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
#ifdef CONFIG_USB_DEFAULT_PERSIST
if (!(udev->quirks & USB_QUIRK_RESET))
udev->persist_enabled = 1;
#else
/* Hubs are automatically enabled for USB-PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
udev->persist_enabled = 1;
#endif /* CONFIG_USB_DEFAULT_PERSIST */
}
void usb_detect_interface_quirks(struct usb_device *udev)
{
u32 quirks;
quirks = usb_detect_static_quirks(udev, usb_interface_quirk_list);
if (quirks == 0)
return;
dev_dbg(&udev->dev, "USB interface quirks for this device: %x\n",
quirks);
udev->quirks |= quirks;
}
void usb_release_quirk_list(void)
{
mutex_lock(&quirk_mutex);
kfree(quirk_list);
quirk_list = NULL;
mutex_unlock(&quirk_mutex);
}
| linux-master | drivers/usb/core/quirks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/file.c
*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2001 (kernel hotplug, usb_device_id,
* more docs, etc)
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/usb.h>
#include "usb.h"
#define MAX_USB_MINORS 256
static const struct file_operations *usb_minors[MAX_USB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
static int usb_open(struct inode *inode, struct file *file)
{
int err = -ENODEV;
const struct file_operations *new_fops;
down_read(&minor_rwsem);
new_fops = fops_get(usb_minors[iminor(inode)]);
if (!new_fops)
goto done;
replace_fops(file, new_fops);
/* Curiouser and curiouser... NULL ->open() as "no device" ? */
if (file->f_op->open)
err = file->f_op->open(inode, file);
done:
up_read(&minor_rwsem);
return err;
}
static const struct file_operations usb_fops = {
.owner = THIS_MODULE,
.open = usb_open,
.llseek = noop_llseek,
};
static char *usb_devnode(const struct device *dev, umode_t *mode)
{
struct usb_class_driver *drv;
drv = dev_get_drvdata(dev);
if (!drv || !drv->devnode)
return NULL;
return drv->devnode(dev, mode);
}
const struct class usbmisc_class = {
.name = "usbmisc",
.devnode = usb_devnode,
};
int usb_major_init(void)
{
int error;
error = register_chrdev(USB_MAJOR, "usb", &usb_fops);
if (error)
printk(KERN_ERR "Unable to get major %d for usb devices\n",
USB_MAJOR);
return error;
}
void usb_major_cleanup(void)
{
unregister_chrdev(USB_MAJOR, "usb");
}
/**
* usb_register_dev - register a USB device, and ask for a minor number
* @intf: pointer to the usb_interface that is being registered
* @class_driver: pointer to the usb_class_driver for this device
*
* This should be called by all USB drivers that use the USB major number.
* If CONFIG_USB_DYNAMIC_MINORS is enabled, the minor number will be
* dynamically allocated out of the list of available ones. If it is not
* enabled, the minor number will be based on the next available free minor,
* starting at the class_driver->minor_base.
*
* This function also creates a usb class device in the sysfs tree.
*
* usb_deregister_dev() must be called when the driver is done with
* the minor numbers given out by this function.
*
* Return: -EINVAL if something bad happens with trying to register a
* device, and 0 on success.
*/
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
int retval = 0;
int minor_base = class_driver->minor_base;
int minor;
char name[20];
#ifdef CONFIG_USB_DYNAMIC_MINORS
/*
* We don't care what the device tries to start at, we want to start
* at zero to pack the devices into the smallest available space with
* no holes in the minor range.
*/
minor_base = 0;
#endif
if (class_driver->fops == NULL)
return -EINVAL;
if (intf->minor >= 0)
return -EADDRINUSE;
dev_dbg(&intf->dev, "looking for a minor, starting at %d\n", minor_base);
down_write(&minor_rwsem);
for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
if (usb_minors[minor])
continue;
usb_minors[minor] = class_driver->fops;
intf->minor = minor;
break;
}
if (intf->minor < 0) {
up_write(&minor_rwsem);
return -EXFULL;
}
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
intf->usb_dev = device_create(&usbmisc_class, &intf->dev,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
usb_minors[minor] = NULL;
intf->minor = -1;
retval = PTR_ERR(intf->usb_dev);
}
up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
/**
* usb_deregister_dev - deregister a USB device's dynamic minor.
* @intf: pointer to the usb_interface that is being deregistered
* @class_driver: pointer to the usb_class_driver for this device
*
* Used in conjunction with usb_register_dev(). This function is called
* when the USB driver is finished with the minor numbers gotten from a
* call to usb_register_dev() (usually when the device is disconnected
* from the system.)
*
* This function also removes the usb class device from the sysfs tree.
*
* This should be called by all drivers that use the USB major number.
*/
void usb_deregister_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
if (intf->minor == -1)
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
device_destroy(&usbmisc_class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
intf->usb_dev = NULL;
intf->minor = -1;
}
EXPORT_SYMBOL_GPL(usb_deregister_dev);
| linux-master | drivers/usb/core/file.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB port LED trigger
*
* Copyright (C) 2016 Rafał Miłecki <[email protected]>
*/
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/of.h>
struct usbport_trig_data {
struct led_classdev *led_cdev;
struct list_head ports;
struct notifier_block nb;
int count; /* Amount of connected matching devices */
};
struct usbport_trig_port {
struct usbport_trig_data *data;
struct usb_device *hub;
int portnum;
char *port_name;
bool observed;
struct device_attribute attr;
struct list_head list;
};
/***************************************
* Helpers
***************************************/
/*
* usbport_trig_usb_dev_observed - Check if dev is connected to observed port
*/
static bool usbport_trig_usb_dev_observed(struct usbport_trig_data *usbport_data,
struct usb_device *usb_dev)
{
struct usbport_trig_port *port;
if (!usb_dev->parent)
return false;
list_for_each_entry(port, &usbport_data->ports, list) {
if (usb_dev->parent == port->hub &&
usb_dev->portnum == port->portnum)
return port->observed;
}
return false;
}
static int usbport_trig_usb_dev_check(struct usb_device *usb_dev, void *data)
{
struct usbport_trig_data *usbport_data = data;
if (usbport_trig_usb_dev_observed(usbport_data, usb_dev))
usbport_data->count++;
return 0;
}
/*
* usbport_trig_update_count - Recalculate amount of connected matching devices
*/
static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
{
struct led_classdev *led_cdev = usbport_data->led_cdev;
usbport_data->count = 0;
usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF);
}
/***************************************
* Device attr
***************************************/
static ssize_t usbport_trig_port_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usbport_trig_port *port = container_of(attr,
struct usbport_trig_port,
attr);
return sprintf(buf, "%d\n", port->observed) + 1;
}
static ssize_t usbport_trig_port_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct usbport_trig_port *port = container_of(attr,
struct usbport_trig_port,
attr);
if (!strcmp(buf, "0") || !strcmp(buf, "0\n"))
port->observed = 0;
else if (!strcmp(buf, "1") || !strcmp(buf, "1\n"))
port->observed = 1;
else
return -EINVAL;
usbport_trig_update_count(port->data);
return size;
}
static struct attribute *ports_attrs[] = {
NULL,
};
static const struct attribute_group ports_group = {
.name = "ports",
.attrs = ports_attrs,
};
/***************************************
* Adding & removing ports
***************************************/
/*
* usbport_trig_port_observed - Check if port should be observed
*/
static bool usbport_trig_port_observed(struct usbport_trig_data *usbport_data,
struct usb_device *usb_dev, int port1)
{
struct device *dev = usbport_data->led_cdev->dev;
struct device_node *led_np = dev->of_node;
struct of_phandle_args args;
struct device_node *port_np;
int count, i;
if (!led_np)
return false;
/*
* Get node of port being added
*
* FIXME: This is really the device node of the connected device
*/
port_np = usb_of_get_device_node(usb_dev, port1);
if (!port_np)
return false;
of_node_put(port_np);
/* Amount of trigger sources for this LED */
count = of_count_phandle_with_args(led_np, "trigger-sources",
"#trigger-source-cells");
if (count < 0) {
dev_warn(dev, "Failed to get trigger sources for %pOF\n",
led_np);
return false;
}
/* Check list of sources for this specific port */
for (i = 0; i < count; i++) {
int err;
err = of_parse_phandle_with_args(led_np, "trigger-sources",
"#trigger-source-cells", i,
&args);
if (err) {
dev_err(dev, "Failed to get trigger source phandle at index %d: %d\n",
i, err);
continue;
}
of_node_put(args.np);
if (args.np == port_np)
return true;
}
return false;
}
static int usbport_trig_add_port(struct usbport_trig_data *usbport_data,
struct usb_device *usb_dev,
const char *hub_name, int portnum)
{
struct led_classdev *led_cdev = usbport_data->led_cdev;
struct usbport_trig_port *port;
size_t len;
int err;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
err = -ENOMEM;
goto err_out;
}
port->data = usbport_data;
port->hub = usb_dev;
port->portnum = portnum;
port->observed = usbport_trig_port_observed(usbport_data, usb_dev,
portnum);
len = strlen(hub_name) + 8;
port->port_name = kzalloc(len, GFP_KERNEL);
if (!port->port_name) {
err = -ENOMEM;
goto err_free_port;
}
snprintf(port->port_name, len, "%s-port%d", hub_name, portnum);
sysfs_attr_init(&port->attr.attr);
port->attr.attr.name = port->port_name;
port->attr.attr.mode = S_IRUSR | S_IWUSR;
port->attr.show = usbport_trig_port_show;
port->attr.store = usbport_trig_port_store;
err = sysfs_add_file_to_group(&led_cdev->dev->kobj, &port->attr.attr,
ports_group.name);
if (err)
goto err_free_port_name;
list_add_tail(&port->list, &usbport_data->ports);
return 0;
err_free_port_name:
kfree(port->port_name);
err_free_port:
kfree(port);
err_out:
return err;
}
static int usbport_trig_add_usb_dev_ports(struct usb_device *usb_dev,
void *data)
{
struct usbport_trig_data *usbport_data = data;
int i;
for (i = 1; i <= usb_dev->maxchild; i++)
usbport_trig_add_port(usbport_data, usb_dev,
dev_name(&usb_dev->dev), i);
return 0;
}
static void usbport_trig_remove_port(struct usbport_trig_data *usbport_data,
struct usbport_trig_port *port)
{
struct led_classdev *led_cdev = usbport_data->led_cdev;
list_del(&port->list);
sysfs_remove_file_from_group(&led_cdev->dev->kobj, &port->attr.attr,
ports_group.name);
kfree(port->port_name);
kfree(port);
}
static void usbport_trig_remove_usb_dev_ports(struct usbport_trig_data *usbport_data,
struct usb_device *usb_dev)
{
struct usbport_trig_port *port, *tmp;
list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
if (port->hub == usb_dev)
usbport_trig_remove_port(usbport_data, port);
}
}
/***************************************
* Init, exit, etc.
***************************************/
static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct usbport_trig_data *usbport_data =
container_of(nb, struct usbport_trig_data, nb);
struct led_classdev *led_cdev = usbport_data->led_cdev;
struct usb_device *usb_dev = data;
bool observed;
observed = usbport_trig_usb_dev_observed(usbport_data, usb_dev);
switch (action) {
case USB_DEVICE_ADD:
usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
if (observed && usbport_data->count++ == 0)
led_set_brightness(led_cdev, LED_FULL);
return NOTIFY_OK;
case USB_DEVICE_REMOVE:
usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
if (observed && --usbport_data->count == 0)
led_set_brightness(led_cdev, LED_OFF);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int usbport_trig_activate(struct led_classdev *led_cdev)
{
struct usbport_trig_data *usbport_data;
int err;
usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
if (!usbport_data)
return -ENOMEM;
usbport_data->led_cdev = led_cdev;
/* List of ports */
INIT_LIST_HEAD(&usbport_data->ports);
err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
if (err)
goto err_free;
usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
usbport_trig_update_count(usbport_data);
/* Notifications */
usbport_data->nb.notifier_call = usbport_trig_notify;
led_set_trigger_data(led_cdev, usbport_data);
usb_register_notify(&usbport_data->nb);
return 0;
err_free:
kfree(usbport_data);
return err;
}
static void usbport_trig_deactivate(struct led_classdev *led_cdev)
{
struct usbport_trig_data *usbport_data = led_get_trigger_data(led_cdev);
struct usbport_trig_port *port, *tmp;
list_for_each_entry_safe(port, tmp, &usbport_data->ports, list) {
usbport_trig_remove_port(usbport_data, port);
}
sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
usb_unregister_notify(&usbport_data->nb);
kfree(usbport_data);
}
static struct led_trigger usbport_led_trigger = {
.name = "usbport",
.activate = usbport_trig_activate,
.deactivate = usbport_trig_deactivate,
};
module_led_trigger(usbport_led_trigger);
MODULE_AUTHOR("Rafał Miłecki <[email protected]>");
MODULE_DESCRIPTION("USB port trigger");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/usb/core/ledtrig-usbport.c |
// SPDX-License-Identifier: GPL-2.0
/*
* All the USB notify logic
*
* (C) Copyright 2005 Greg Kroah-Hartman <[email protected]>
*
* notifier functions originally based on those in kernel/sys.c
* but fixed up to not be so broken.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/notifier.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include "usb.h"
static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
/**
* usb_register_notify - register a notifier callback whenever a usb change happens
* @nb: pointer to the notifier block for the callback events.
*
* These changes are either USB devices or busses being added or removed.
*/
void usb_register_notify(struct notifier_block *nb)
{
blocking_notifier_chain_register(&usb_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(usb_register_notify);
/**
* usb_unregister_notify - unregister a notifier callback
* @nb: pointer to the notifier block for the callback events.
*
* usb_register_notify() must have been previously called for this function
* to work properly.
*/
void usb_unregister_notify(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&usb_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(usb_unregister_notify);
void usb_notify_add_device(struct usb_device *udev)
{
blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev);
}
void usb_notify_remove_device(struct usb_device *udev)
{
blocking_notifier_call_chain(&usb_notifier_list,
USB_DEVICE_REMOVE, udev);
}
void usb_notify_add_bus(struct usb_bus *ubus)
{
blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus);
}
void usb_notify_remove_bus(struct usb_bus *ubus)
{
blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
}
| linux-master | drivers/usb/core/notify.c |
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/driver.c - most of the driver model stuff for usb
*
* (C) Copyright 2005 Greg Kroah-Hartman <[email protected]>
*
* based on drivers/usb/usb.c which had the following copyrights:
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2004
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* matching, probing, releasing, suspending and resuming for
* real drivers.
*
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h>
#include "usb.h"
/*
* Adds a new dynamic USBdevice ID to this driver,
* and cause the driver to probe for all devices again.
*/
ssize_t usb_store_new_id(struct usb_dynids *dynids,
const struct usb_device_id *id_table,
struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_dynid *dynid;
u32 idVendor = 0;
u32 idProduct = 0;
unsigned int bInterfaceClass = 0;
u32 refVendor, refProduct;
int fields = 0;
int retval = 0;
fields = sscanf(buf, "%x %x %x %x %x", &idVendor, &idProduct,
&bInterfaceClass, &refVendor, &refProduct);
if (fields < 2)
return -EINVAL;
dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
if (!dynid)
return -ENOMEM;
INIT_LIST_HEAD(&dynid->node);
dynid->id.idVendor = idVendor;
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
if (fields > 2 && bInterfaceClass) {
if (bInterfaceClass > 255) {
retval = -EINVAL;
goto fail;
}
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
}
if (fields > 4) {
const struct usb_device_id *id = id_table;
if (!id) {
retval = -ENODEV;
goto fail;
}
for (; id->match_flags; id++)
if (id->idVendor == refVendor && id->idProduct == refProduct)
break;
if (id->match_flags) {
dynid->id.driver_info = id->driver_info;
} else {
retval = -ENODEV;
goto fail;
}
}
spin_lock(&dynids->lock);
list_add_tail(&dynid->node, &dynids->list);
spin_unlock(&dynids->lock);
retval = driver_attach(driver);
if (retval)
return retval;
return count;
fail:
kfree(dynid);
return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf)
{
struct usb_dynid *dynid;
size_t count = 0;
list_for_each_entry(dynid, &dynids->list, node)
if (dynid->id.bInterfaceClass != 0)
count += scnprintf(&buf[count], PAGE_SIZE - count, "%04x %04x %02x\n",
dynid->id.idVendor, dynid->id.idProduct,
dynid->id.bInterfaceClass);
else
count += scnprintf(&buf[count], PAGE_SIZE - count, "%04x %04x\n",
dynid->id.idVendor, dynid->id.idProduct);
return count;
}
EXPORT_SYMBOL_GPL(usb_show_dynids);
static ssize_t new_id_show(struct device_driver *driver, char *buf)
{
struct usb_driver *usb_drv = to_usb_driver(driver);
return usb_show_dynids(&usb_drv->dynids, buf);
}
static ssize_t new_id_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_driver *usb_drv = to_usb_driver(driver);
return usb_store_new_id(&usb_drv->dynids, usb_drv->id_table, driver, buf, count);
}
static DRIVER_ATTR_RW(new_id);
/*
* Remove a USB device ID from this driver
*/
static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
size_t count)
{
struct usb_dynid *dynid, *n;
struct usb_driver *usb_driver = to_usb_driver(driver);
u32 idVendor;
u32 idProduct;
int fields;
fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
if (fields < 2)
return -EINVAL;
spin_lock(&usb_driver->dynids.lock);
list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
struct usb_device_id *id = &dynid->id;
if ((id->idVendor == idVendor) &&
(id->idProduct == idProduct)) {
list_del(&dynid->node);
kfree(dynid);
break;
}
}
spin_unlock(&usb_driver->dynids.lock);
return count;
}
static ssize_t remove_id_show(struct device_driver *driver, char *buf)
{
return new_id_show(driver, buf);
}
static DRIVER_ATTR_RW(remove_id);
static int usb_create_newid_files(struct usb_driver *usb_drv)
{
int error = 0;
if (usb_drv->no_dynamic_id)
goto exit;
if (usb_drv->probe != NULL) {
error = driver_create_file(&usb_drv->drvwrap.driver,
&driver_attr_new_id);
if (error == 0) {
error = driver_create_file(&usb_drv->drvwrap.driver,
&driver_attr_remove_id);
if (error)
driver_remove_file(&usb_drv->drvwrap.driver,
&driver_attr_new_id);
}
}
exit:
return error;
}
static void usb_remove_newid_files(struct usb_driver *usb_drv)
{
if (usb_drv->no_dynamic_id)
return;
if (usb_drv->probe != NULL) {
driver_remove_file(&usb_drv->drvwrap.driver,
&driver_attr_remove_id);
driver_remove_file(&usb_drv->drvwrap.driver,
&driver_attr_new_id);
}
}
static void usb_free_dynids(struct usb_driver *usb_drv)
{
struct usb_dynid *dynid, *n;
spin_lock(&usb_drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &usb_drv->dynids.list, node) {
list_del(&dynid->node);
kfree(dynid);
}
spin_unlock(&usb_drv->dynids.lock);
}
static const struct usb_device_id *usb_match_dynamic_id(struct usb_interface *intf,
struct usb_driver *drv)
{
struct usb_dynid *dynid;
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (usb_match_one_id(intf, &dynid->id)) {
spin_unlock(&drv->dynids.lock);
return &dynid->id;
}
}
spin_unlock(&drv->dynids.lock);
return NULL;
}
/* called from driver core with dev locked */
static int usb_probe_device(struct device *dev)
{
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
struct usb_device *udev = to_usb_device(dev);
int error = 0;
dev_dbg(dev, "%s\n", __func__);
/* TODO: Add real matching code */
/* The device should always appear to be in use
* unless the driver supports autosuspend.
*/
if (!udriver->supports_autosuspend)
error = usb_autoresume_device(udev);
if (error)
return error;
if (udriver->generic_subclass)
error = usb_generic_driver_probe(udev);
if (error)
return error;
/* Probe the USB device with the driver in hand, but only
* defer to a generic driver in case the current USB
* device driver has an id_table or a match function; i.e.,
* when the device driver was explicitly matched against
* a device.
*
* If the device driver does not have either of these,
* then we assume that it can bind to any device and is
* not truly a more specialized/non-generic driver, so a
* return value of -ENODEV should not force the device
* to be handled by the generic USB driver, as there
* can still be another, more specialized, device driver.
*
* This accommodates the usbip driver.
*
* TODO: What if, in the future, there are multiple
* specialized USB device drivers for a particular device?
* In such cases, there is a need to try all matching
* specialised device drivers prior to setting the
* use_generic_driver bit.
*/
error = udriver->probe(udev);
if (error == -ENODEV && udriver != &usb_generic_driver &&
(udriver->id_table || udriver->match)) {
udev->use_generic_driver = 1;
return -EPROBE_DEFER;
}
return error;
}
/* called from driver core with dev locked */
static int usb_unbind_device(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
if (udriver->disconnect)
udriver->disconnect(udev);
if (udriver->generic_subclass)
usb_generic_driver_disconnect(udev);
if (!udriver->supports_autosuspend)
usb_autosuspend_device(udev);
return 0;
}
/* called from driver core with dev locked */
static int usb_probe_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev = interface_to_usbdev(intf);
const struct usb_device_id *id;
int error = -ENODEV;
int lpm_disable_error = -ENODEV;
dev_dbg(dev, "%s\n", __func__);
intf->needs_binding = 0;
if (usb_device_is_owned(udev))
return error;
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
return error;
} else if (intf->authorized == 0) {
dev_err(&intf->dev, "Interface %d is not authorized for usage\n",
intf->altsetting->desc.bInterfaceNumber);
return error;
}
id = usb_match_dynamic_id(intf, driver);
if (!id)
id = usb_match_id(intf, driver->id_table);
if (!id)
return error;
dev_dbg(dev, "%s - got id\n", __func__);
error = usb_autoresume_device(udev);
if (error)
return error;
intf->condition = USB_INTERFACE_BINDING;
/* Probed interfaces are initially active. They are
* runtime-PM-enabled only if the driver has autosuspend support.
* They are sensitive to their children's power states.
*/
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
/* If the new driver doesn't allow hub-initiated LPM, and we can't
* disable hub-initiated LPM, then fail the probe.
*
* Otherwise, leaving LPM enabled should be harmless, because the
* endpoint intervals should remain the same, and the U1/U2 timeouts
* should remain the same.
*
* If we need to install alt setting 0 before probe, or another alt
* setting during probe, that should also be fine. usb_set_interface()
* will attempt to disable LPM, and fail if it can't disable it.
*/
if (driver->disable_hub_initiated_lpm) {
lpm_disable_error = usb_unlocked_disable_lpm(udev);
if (lpm_disable_error) {
dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n",
__func__, driver->name);
error = lpm_disable_error;
goto err;
}
}
/* Carry out a deferred switch to altsetting 0 */
if (intf->needs_altsetting0) {
error = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (error < 0)
goto err;
intf->needs_altsetting0 = 0;
}
error = driver->probe(intf, id);
if (error)
goto err;
intf->condition = USB_INTERFACE_BOUND;
/* If the LPM disable succeeded, balance the ref counts. */
if (!lpm_disable_error)
usb_unlocked_enable_lpm(udev);
usb_autosuspend_device(udev);
return error;
err:
usb_set_intfdata(intf, NULL);
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
/* If the LPM disable succeeded, balance the ref counts. */
if (!lpm_disable_error)
usb_unlocked_enable_lpm(udev);
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
if (driver->supports_autosuspend)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
usb_autosuspend_device(udev);
return error;
}
/* called from driver core with dev locked */
static int usb_unbind_interface(struct device *dev)
{
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
int i, j, error, r;
int lpm_disable_error = -ENODEV;
intf->condition = USB_INTERFACE_UNBINDING;
/* Autoresume for set_interface call below */
udev = interface_to_usbdev(intf);
error = usb_autoresume_device(udev);
/* If hub-initiated LPM policy may change, attempt to disable LPM until
* the driver is unbound. If LPM isn't disabled, that's fine because it
* wouldn't be enabled unless all the bound interfaces supported
* hub-initiated LPM.
*/
if (driver->disable_hub_initiated_lpm)
lpm_disable_error = usb_unlocked_disable_lpm(udev);
/*
* Terminate all URBs for this interface unless the driver
* supports "soft" unbinding and the device is still present.
*/
if (!driver->soft_unbind || udev->state == USB_STATE_NOTATTACHED)
usb_disable_interface(udev, intf, false);
driver->disconnect(intf);
/* Free streams */
for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
ep = &intf->cur_altsetting->endpoint[i];
if (ep->streams == 0)
continue;
if (j == 0) {
eps = kmalloc_array(USB_MAXENDPOINTS, sizeof(void *),
GFP_KERNEL);
if (!eps)
break;
}
eps[j++] = ep;
}
if (j) {
usb_free_streams(intf, eps, j, GFP_KERNEL);
kfree(eps);
}
/* Reset other interface state.
* We cannot do a Set-Interface if the device is suspended or
* if it is prepared for a system sleep (since installing a new
* altsetting means creating new endpoint device entries).
* When either of these happens, defer the Set-Interface.
*/
if (intf->cur_altsetting->desc.bAlternateSetting == 0) {
/* Already in altsetting 0 so skip Set-Interface.
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
} else if (!error && !intf->dev.power.is_prepared) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
intf->needs_altsetting0 = 1;
} else {
intf->needs_altsetting0 = 1;
}
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
intf->needs_remote_wakeup = 0;
/* Attempt to re-enable USB3 LPM, if the disable succeeded. */
if (!lpm_disable_error)
usb_unlocked_enable_lpm(udev);
/* Unbound interfaces are always runtime-PM-disabled and -suspended */
if (driver->supports_autosuspend)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
if (!error)
usb_autosuspend_device(udev);
return 0;
}
/**
* usb_driver_claim_interface - bind a driver to an interface
* @driver: the driver to be bound
* @iface: the interface to which it will be bound; must be in the
* usb device's active configuration
* @data: driver data associated with that interface
*
* This is used by usb device drivers that need to claim more than one
* interface on a device when probing (audio and acm are current examples).
* No device driver should directly modify internal usb_interface or
* usb_device structure members.
*
* Callers must own the device lock, so driver probe() entries don't need
* extra locking, but other call contexts may need to explicitly claim that
* lock.
*
* Return: 0 on success.
*/
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *data)
{
struct device *dev;
int retval = 0;
if (!iface)
return -ENODEV;
dev = &iface->dev;
if (dev->driver)
return -EBUSY;
/* reject claim if interface is not authorized */
if (!iface->authorized)
return -ENODEV;
dev->driver = &driver->drvwrap.driver;
usb_set_intfdata(iface, data);
iface->needs_binding = 0;
iface->condition = USB_INTERFACE_BOUND;
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
* device from being autosuspended, but left disabled. In either
* case they are sensitive to their children's power states.
*/
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
else
pm_runtime_set_active(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
*/
if (device_is_registered(dev))
retval = device_bind_driver(dev);
if (retval) {
dev->driver = NULL;
usb_set_intfdata(iface, NULL);
iface->needs_remote_wakeup = 0;
iface->condition = USB_INTERFACE_UNBOUND;
/*
* Unbound interfaces are always runtime-PM-disabled
* and runtime-PM-suspended
*/
if (driver->supports_autosuspend)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
return retval;
}
EXPORT_SYMBOL_GPL(usb_driver_claim_interface);
/**
* usb_driver_release_interface - unbind a driver from an interface
* @driver: the driver to be unbound
* @iface: the interface from which it will be unbound
*
* This can be used by drivers to release an interface without waiting
* for their disconnect() methods to be called. In typical cases this
* also causes the driver disconnect() method to be called.
*
* This call is synchronous, and may not be used in an interrupt context.
* Callers must own the device lock, so driver disconnect() entries don't
* need extra locking, but other call contexts may need to explicitly claim
* that lock.
*/
void usb_driver_release_interface(struct usb_driver *driver,
struct usb_interface *iface)
{
struct device *dev = &iface->dev;
/* this should never happen, don't release something that's not ours */
if (!dev->driver || dev->driver != &driver->drvwrap.driver)
return;
/* don't release from within disconnect() */
if (iface->condition != USB_INTERFACE_BOUND)
return;
iface->condition = USB_INTERFACE_UNBINDING;
/* Release via the driver core only if the interface
* has already been registered
*/
if (device_is_registered(dev)) {
device_release_driver(dev);
} else {
device_lock(dev);
usb_unbind_interface(dev);
dev->driver = NULL;
device_unlock(dev);
}
}
EXPORT_SYMBOL_GPL(usb_driver_release_interface);
/* returns 0 if no match, 1 if match */
int usb_match_device(struct usb_device *dev, const struct usb_device_id *id)
{
if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
id->idVendor != le16_to_cpu(dev->descriptor.idVendor))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) &&
id->idProduct != le16_to_cpu(dev->descriptor.idProduct))
return 0;
/* No need to test id->bcdDevice_lo != 0, since 0 is never
greater than any unsigned number. */
if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) &&
(id->bcdDevice_lo > le16_to_cpu(dev->descriptor.bcdDevice)))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) &&
(id->bcdDevice_hi < le16_to_cpu(dev->descriptor.bcdDevice)))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) &&
(id->bDeviceClass != dev->descriptor.bDeviceClass))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) &&
(id->bDeviceSubClass != dev->descriptor.bDeviceSubClass))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) &&
(id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
return 0;
return 1;
}
/* returns 0 if no match, 1 if match */
int usb_match_one_id_intf(struct usb_device *dev,
struct usb_host_interface *intf,
const struct usb_device_id *id)
{
/* The interface class, subclass, protocol and number should never be
* checked for a match if the device class is Vendor Specific,
* unless the match record specifies the Vendor ID. */
if (dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC &&
!(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) &&
(id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS |
USB_DEVICE_ID_MATCH_INT_PROTOCOL |
USB_DEVICE_ID_MATCH_INT_NUMBER)))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) &&
(id->bInterfaceClass != intf->desc.bInterfaceClass))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) &&
(id->bInterfaceSubClass != intf->desc.bInterfaceSubClass))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) &&
(id->bInterfaceProtocol != intf->desc.bInterfaceProtocol))
return 0;
if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER) &&
(id->bInterfaceNumber != intf->desc.bInterfaceNumber))
return 0;
return 1;
}
/* returns 0 if no match, 1 if match */
int usb_match_one_id(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_host_interface *intf;
struct usb_device *dev;
/* proc_connectinfo in devio.c may call us with id == NULL. */
if (id == NULL)
return 0;
intf = interface->cur_altsetting;
dev = interface_to_usbdev(interface);
if (!usb_match_device(dev, id))
return 0;
return usb_match_one_id_intf(dev, intf, id);
}
EXPORT_SYMBOL_GPL(usb_match_one_id);
/**
* usb_match_id - find first usb_device_id matching device or interface
* @interface: the interface of interest
* @id: array of usb_device_id structures, terminated by zero entry
*
* usb_match_id searches an array of usb_device_id's and returns
* the first one matching the device or interface, or null.
* This is used when binding (or rebinding) a driver to an interface.
* Most USB device drivers will use this indirectly, through the usb core,
* but some layered driver frameworks use it directly.
* These device tables are exported with MODULE_DEVICE_TABLE, through
* modutils, to support the driver loading functionality of USB hotplugging.
*
* Return: The first matching usb_device_id, or %NULL.
*
* What Matches:
*
* The "match_flags" element in a usb_device_id controls which
* members are used. If the corresponding bit is set, the
* value in the device_id must match its corresponding member
* in the device or interface descriptor, or else the device_id
* does not match.
*
* "driver_info" is normally used only by device drivers,
* but you can create a wildcard "matches anything" usb_device_id
* as a driver's "modules.usbmap" entry if you provide an id with
* only a nonzero "driver_info" field. If you do this, the USB device
* driver's probe() routine should use additional intelligence to
* decide whether to bind to the specified interface.
*
* What Makes Good usb_device_id Tables:
*
* The match algorithm is very simple, so that intelligence in
* driver selection must come from smart driver id records.
* Unless you have good reasons to use another selection policy,
* provide match elements only in related groups, and order match
* specifiers from specific to general. Use the macros provided
* for that purpose if you can.
*
* The most specific match specifiers use device descriptor
* data. These are commonly used with product-specific matches;
* the USB_DEVICE macro lets you provide vendor and product IDs,
* and you can also match against ranges of product revisions.
* These are widely used for devices with application or vendor
* specific bDeviceClass values.
*
* Matches based on device class/subclass/protocol specifications
* are slightly more general; use the USB_DEVICE_INFO macro, or
* its siblings. These are used with single-function devices
* where bDeviceClass doesn't specify that each interface has
* its own class.
*
* Matches based on interface class/subclass/protocol are the
* most general; they let drivers bind to any interface on a
* multiple-function device. Use the USB_INTERFACE_INFO
* macro, or its siblings, to match class-per-interface style
* devices (as recorded in bInterfaceClass).
*
* Note that an entry created by USB_INTERFACE_INFO won't match
* any interface if the device class is set to Vendor-Specific.
* This is deliberate; according to the USB spec the meanings of
* the interface class/subclass/protocol for these devices are also
* vendor-specific, and hence matching against a standard product
* class wouldn't work anyway. If you really want to use an
* interface-based match for such a device, create a match record
* that also specifies the vendor ID. (Unforunately there isn't a
* standard macro for creating records like this.)
*
* Within those groups, remember that not all combinations are
* meaningful. For example, don't give a product version range
* without vendor and product IDs; or specify a protocol without
* its associated class and subclass.
*/
const struct usb_device_id *usb_match_id(struct usb_interface *interface,
const struct usb_device_id *id)
{
/* proc_connectinfo in devio.c may call us with id == NULL. */
if (id == NULL)
return NULL;
/* It is important to check that id->driver_info is nonzero,
since an entry that is all zeroes except for a nonzero
id->driver_info is the way to create an entry that
indicates that the driver want to examine every
device and interface. */
for (; id->idVendor || id->idProduct || id->bDeviceClass ||
id->bInterfaceClass || id->driver_info; id++) {
if (usb_match_one_id(interface, id))
return id;
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_match_id);
const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
const struct usb_device_id *id)
{
if (!id)
return NULL;
for (; id->idVendor || id->idProduct ; id++) {
if (usb_match_device(udev, id))
return id;
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_device_match_id);
bool usb_driver_applicable(struct usb_device *udev,
struct usb_device_driver *udrv)
{
if (udrv->id_table && udrv->match)
return usb_device_match_id(udev, udrv->id_table) != NULL &&
udrv->match(udev);
if (udrv->id_table)
return usb_device_match_id(udev, udrv->id_table) != NULL;
if (udrv->match)
return udrv->match(udev);
return false;
}
static int usb_device_match(struct device *dev, struct device_driver *drv)
{
/* devices and interfaces are handled separately */
if (is_usb_device(dev)) {
struct usb_device *udev;
struct usb_device_driver *udrv;
/* interface drivers never match devices */
if (!is_usb_device_driver(drv))
return 0;
udev = to_usb_device(dev);
udrv = to_usb_device_driver(drv);
/* If the device driver under consideration does not have a
* id_table or a match function, then let the driver's probe
* function decide.
*/
if (!udrv->id_table && !udrv->match)
return 1;
return usb_driver_applicable(udev, udrv);
} else if (is_usb_interface(dev)) {
struct usb_interface *intf;
struct usb_driver *usb_drv;
const struct usb_device_id *id;
/* device drivers never match interfaces */
if (is_usb_device_driver(drv))
return 0;
intf = to_usb_interface(dev);
usb_drv = to_usb_driver(drv);
id = usb_match_id(intf, usb_drv->id_table);
if (id)
return 1;
id = usb_match_dynamic_id(intf, usb_drv);
if (id)
return 1;
}
return 0;
}
static int usb_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct usb_device *usb_dev;
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
const struct usb_interface *intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
} else {
return 0;
}
if (usb_dev->devnum < 0) {
/* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: already deleted?\n", dev_name(dev));
return -ENODEV;
}
if (!usb_dev->bus) {
pr_debug("usb %s: bus removed?\n", dev_name(dev));
return -ENODEV;
}
/* per-device configurations are common */
if (add_uevent_var(env, "PRODUCT=%x/%x/%x",
le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct),
le16_to_cpu(usb_dev->descriptor.bcdDevice)))
return -ENOMEM;
/* class-based driver binding models */
if (add_uevent_var(env, "TYPE=%d/%d/%d",
usb_dev->descriptor.bDeviceClass,
usb_dev->descriptor.bDeviceSubClass,
usb_dev->descriptor.bDeviceProtocol))
return -ENOMEM;
return 0;
}
static int __usb_bus_reprobe_drivers(struct device *dev, void *data)
{
struct usb_device_driver *new_udriver = data;
struct usb_device *udev;
int ret;
/* Don't reprobe if current driver isn't usb_generic_driver */
if (dev->driver != &usb_generic_driver.drvwrap.driver)
return 0;
udev = to_usb_device(dev);
if (!usb_driver_applicable(udev, new_udriver))
return 0;
ret = device_reprobe(dev);
if (ret && ret != -EPROBE_DEFER)
dev_err(dev, "Failed to reprobe device (error %d)\n", ret);
return 0;
}
/**
* usb_register_device_driver - register a USB device (not interface) driver
* @new_udriver: USB operations for the device driver
* @owner: module owner of this driver.
*
* Registers a USB device driver with the USB core. The list of
* unattached devices will be rescanned whenever a new driver is
* added, allowing the new driver to attach to any recognized devices.
*
* Return: A negative error code on failure and 0 on success.
*/
int usb_register_device_driver(struct usb_device_driver *new_udriver,
struct module *owner)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
new_udriver->drvwrap.for_devices = 1;
new_udriver->drvwrap.driver.name = new_udriver->name;
new_udriver->drvwrap.driver.bus = &usb_bus_type;
new_udriver->drvwrap.driver.probe = usb_probe_device;
new_udriver->drvwrap.driver.remove = usb_unbind_device;
new_udriver->drvwrap.driver.owner = owner;
new_udriver->drvwrap.driver.dev_groups = new_udriver->dev_groups;
retval = driver_register(&new_udriver->drvwrap.driver);
if (!retval) {
pr_info("%s: registered new device driver %s\n",
usbcore_name, new_udriver->name);
/*
* Check whether any device could be better served with
* this new driver
*/
bus_for_each_dev(&usb_bus_type, NULL, new_udriver,
__usb_bus_reprobe_drivers);
} else {
pr_err("%s: error %d registering device driver %s\n",
usbcore_name, retval, new_udriver->name);
}
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_device_driver);
/**
* usb_deregister_device_driver - unregister a USB device (not interface) driver
* @udriver: USB operations of the device driver to unregister
* Context: must be able to sleep
*
* Unlinks the specified driver from the internal USB driver list.
*/
void usb_deregister_device_driver(struct usb_device_driver *udriver)
{
pr_info("%s: deregistering device driver %s\n",
usbcore_name, udriver->name);
driver_unregister(&udriver->drvwrap.driver);
}
EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
/**
* usb_register_driver - register a USB interface driver
* @new_driver: USB operations for the interface driver
* @owner: module owner of this driver.
* @mod_name: module name string
*
* Registers a USB interface driver with the USB core. The list of
* unattached interfaces will be rescanned whenever a new driver is
* added, allowing the new driver to attach to any recognized interfaces.
*
* Return: A negative error code on failure and 0 on success.
*
* NOTE: if you want your driver to use the USB major number, you must call
* usb_register_dev() to enable that functionality. This function no longer
* takes care of that.
*/
int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
const char *mod_name)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
new_driver->drvwrap.for_devices = 0;
new_driver->drvwrap.driver.name = new_driver->name;
new_driver->drvwrap.driver.bus = &usb_bus_type;
new_driver->drvwrap.driver.probe = usb_probe_interface;
new_driver->drvwrap.driver.remove = usb_unbind_interface;
new_driver->drvwrap.driver.owner = owner;
new_driver->drvwrap.driver.mod_name = mod_name;
new_driver->drvwrap.driver.dev_groups = new_driver->dev_groups;
spin_lock_init(&new_driver->dynids.lock);
INIT_LIST_HEAD(&new_driver->dynids.list);
retval = driver_register(&new_driver->drvwrap.driver);
if (retval)
goto out;
retval = usb_create_newid_files(new_driver);
if (retval)
goto out_newid;
pr_info("%s: registered new interface driver %s\n",
usbcore_name, new_driver->name);
out:
return retval;
out_newid:
driver_unregister(&new_driver->drvwrap.driver);
pr_err("%s: error %d registering interface driver %s\n",
usbcore_name, retval, new_driver->name);
goto out;
}
EXPORT_SYMBOL_GPL(usb_register_driver);
/**
* usb_deregister - unregister a USB interface driver
* @driver: USB operations of the interface driver to unregister
* Context: must be able to sleep
*
* Unlinks the specified driver from the internal USB driver list.
*
* NOTE: If you called usb_register_dev(), you still need to call
* usb_deregister_dev() to clean up your driver's allocated minor numbers,
* this * call will no longer do it for you.
*/
void usb_deregister(struct usb_driver *driver)
{
pr_info("%s: deregistering interface driver %s\n",
usbcore_name, driver->name);
usb_remove_newid_files(driver);
driver_unregister(&driver->drvwrap.driver);
usb_free_dynids(driver);
}
EXPORT_SYMBOL_GPL(usb_deregister);
/* Forced unbinding of a USB interface driver, either because
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
*
* The caller must hold @intf's device's lock, but not @intf's lock.
*/
void usb_forced_unbind_intf(struct usb_interface *intf)
{
struct usb_driver *driver = to_usb_driver(intf->dev.driver);
dev_dbg(&intf->dev, "forced unbind\n");
usb_driver_release_interface(driver, intf);
/* Mark the interface for later rebinding */
intf->needs_binding = 1;
}
/*
* Unbind drivers for @udev's marked interfaces. These interfaces have
* the needs_binding flag set, for example by usb_resume_interface().
*
* The caller must hold @udev's device lock.
*/
static void unbind_marked_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
if (intf->dev.driver && intf->needs_binding)
usb_forced_unbind_intf(intf);
}
}
}
/* Delayed forced unbinding of a USB interface driver and scan
* for rebinding.
*
* The caller must hold @intf's device's lock, but not @intf's lock.
*
* Note: Rebinds will be skipped if a system sleep transition is in
* progress and the PM "complete" callback hasn't occurred yet.
*/
static void usb_rebind_intf(struct usb_interface *intf)
{
int rc;
/* Delayed unbind of an existing driver */
if (intf->dev.driver)
usb_forced_unbind_intf(intf);
/* Try to rebind the interface */
if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0 && rc != -EPROBE_DEFER)
dev_warn(&intf->dev, "rebind failed: %d\n", rc);
}
}
/*
* Rebind drivers to @udev's marked interfaces. These interfaces have
* the needs_binding flag set.
*
* The caller must hold @udev's device lock.
*/
static void rebind_marked_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
if (intf->needs_binding)
usb_rebind_intf(intf);
}
}
}
/*
* Unbind all of @udev's marked interfaces and then rebind all of them.
* This ordering is necessary because some drivers claim several interfaces
* when they are first probed.
*
* The caller must hold @udev's device lock.
*/
void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev)
{
unbind_marked_interfaces(udev);
rebind_marked_interfaces(udev);
}
#ifdef CONFIG_PM
/* Unbind drivers for @udev's interfaces that don't support suspend/resume
* There is no check for reset_resume here because it can be determined
* only during resume whether reset_resume is needed.
*
* The caller must hold @udev's device lock.
*/
static void unbind_no_pm_drivers_interfaces(struct usb_device *udev)
{
struct usb_host_config *config;
int i;
struct usb_interface *intf;
struct usb_driver *drv;
config = udev->actconfig;
if (config) {
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
intf = config->interface[i];
if (intf->dev.driver) {
drv = to_usb_driver(intf->dev.driver);
if (!drv->suspend || !drv->resume)
usb_forced_unbind_intf(intf);
}
}
}
}
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
int status = 0;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
/* For devices that don't have a driver, we do a generic suspend. */
if (udev->dev.driver)
udriver = to_usb_device_driver(udev->dev.driver);
else {
udev->do_remote_wakeup = 0;
udriver = &usb_generic_driver;
}
if (udriver->suspend)
status = udriver->suspend(udev, msg);
if (status == 0 && udriver->generic_subclass)
status = usb_generic_driver_suspend(udev, msg);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
return status;
}
static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
int status = 0;
if (udev->state == USB_STATE_NOTATTACHED)
goto done;
/* Can't resume it if it doesn't have a driver. */
if (udev->dev.driver == NULL) {
status = -ENOTCONN;
goto done;
}
/* Non-root devices on a full/low-speed bus must wait for their
* companion high-speed root hub, in case a handoff is needed.
*/
if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion)
device_pm_wait_for_dev(&udev->dev,
&udev->bus->hs_companion->root_hub->dev);
if (udev->quirks & USB_QUIRK_RESET_RESUME)
udev->reset_resume = 1;
udriver = to_usb_device_driver(udev->dev.driver);
if (udriver->generic_subclass)
status = usb_generic_driver_resume(udev, msg);
if (status == 0 && udriver->resume)
status = udriver->resume(udev, msg);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
return status;
}
static int usb_suspend_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
if (udev->state == USB_STATE_NOTATTACHED ||
intf->condition == USB_INTERFACE_UNBOUND)
goto done;
driver = to_usb_driver(intf->dev.driver);
/* at this time we know the driver supports suspend */
status = driver->suspend(intf, msg);
if (status && !PMSG_IS_AUTO(msg))
dev_err(&intf->dev, "suspend error %d\n", status);
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
return status;
}
static int usb_resume_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
if (udev->state == USB_STATE_NOTATTACHED)
goto done;
/* Don't let autoresume interfere with unbinding */
if (intf->condition == USB_INTERFACE_UNBINDING)
goto done;
/* Can't resume it if it doesn't have a driver. */
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;
}
goto done;
}
/* Don't resume if the interface is marked for rebinding */
if (intf->needs_binding)
goto done;
driver = to_usb_driver(intf->dev.driver);
if (reset_resume) {
if (driver->reset_resume) {
status = driver->reset_resume(intf);
if (status)
dev_err(&intf->dev, "%s error %d\n",
"reset_resume", status);
} else {
intf->needs_binding = 1;
dev_dbg(&intf->dev, "no reset_resume for driver %s?\n",
driver->name);
}
} else {
status = driver->resume(intf);
if (status)
dev_err(&intf->dev, "resume error %d\n", status);
}
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
/* Later we will unbind the driver and/or reprobe, if necessary */
return status;
}
/**
* usb_suspend_both - suspend a USB device and its interfaces
* @udev: the usb_device to suspend
* @msg: Power Management message describing this state transition
*
* This is the central routine for suspending USB devices. It calls the
* suspend methods for all the interface drivers in @udev and then calls
* the suspend method for @udev itself. When the routine is called in
* autosuspend, if an error occurs at any stage, all the interfaces
* which were suspended are resumed so that they remain in the same
* state as the device, but when called from system sleep, all error
* from suspend methods of interfaces and the non-root-hub device itself
* are simply ignored, so all suspended interfaces are only resumed
* to the device's state when @udev is root-hub and its suspend method
* returns failure.
*
* Autosuspend requests originating from a child device or an interface
* driver may be made without the protection of @udev's device lock, but
* all other suspend calls will hold the lock. Usbcore will insure that
* method calls do not arrive during bind, unbind, or reset operations.
* However drivers must be prepared to handle suspend calls arriving at
* unpredictable times.
*
* This routine can run only in process context.
*
* Return: 0 if the suspend succeeded.
*/
static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
{
int status = 0;
int i = 0, n = 0;
struct usb_interface *intf;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
n = udev->actconfig->desc.bNumInterfaces;
for (i = n - 1; i >= 0; --i) {
intf = udev->actconfig->interface[i];
status = usb_suspend_interface(udev, intf, msg);
/* Ignore errors during system sleep transitions */
if (!PMSG_IS_AUTO(msg))
status = 0;
if (status != 0)
break;
}
}
if (status == 0) {
status = usb_suspend_device(udev, msg);
/*
* Ignore errors from non-root-hub devices during
* system sleep transitions. For the most part,
* these devices should go to low power anyway when
* the entire bus is suspended.
*/
if (udev->parent && !PMSG_IS_AUTO(msg))
status = 0;
/*
* If the device is inaccessible, don't try to resume
* suspended interfaces and just return the error.
*/
if (status && status != -EBUSY) {
int err;
u16 devstat;
err = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
if (err) {
dev_err(&udev->dev,
"Failed to suspend device, error %d\n",
status);
goto done;
}
}
}
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
if (udev->actconfig) {
msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
while (++i < n) {
intf = udev->actconfig->interface[i];
usb_resume_interface(udev, intf, msg, 0);
}
}
/* If the suspend succeeded then prevent any more URB submissions
* and flush any outstanding URBs.
*/
} else {
udev->can_submit = 0;
for (i = 0; i < 16; ++i) {
usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
}
}
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
return status;
}
/**
* usb_resume_both - resume a USB device and its interfaces
* @udev: the usb_device to resume
* @msg: Power Management message describing this state transition
*
* This is the central routine for resuming USB devices. It calls the
* resume method for @udev and then calls the resume methods for all
* the interface drivers in @udev.
*
* Autoresume requests originating from a child device or an interface
* driver may be made without the protection of @udev's device lock, but
* all other resume calls will hold the lock. Usbcore will insure that
* method calls do not arrive during bind, unbind, or reset operations.
* However drivers must be prepared to handle resume calls arriving at
* unpredictable times.
*
* This routine can run only in process context.
*
* Return: 0 on success.
*/
static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
{
int status = 0;
int i;
struct usb_interface *intf;
if (udev->state == USB_STATE_NOTATTACHED) {
status = -ENODEV;
goto done;
}
udev->can_submit = 1;
/* Resume the device */
if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
status = usb_resume_device(udev, msg);
/* Resume the interfaces */
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
usb_resume_interface(udev, intf, msg,
udev->reset_resume);
}
}
usb_mark_last_busy(udev);
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
if (!status)
udev->reset_resume = 0;
return status;
}
static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
{
int w;
/*
* For FREEZE/QUIESCE, disable remote wakeups so no interrupts get
* generated.
*/
if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
w = 0;
} else {
/*
* Enable remote wakeup if it is allowed, even if no interface
* drivers actually want it.
*/
w = device_may_wakeup(&udev->dev);
}
/*
* If the device is autosuspended with the wrong wakeup setting,
* autoresume now so the setting can be changed.
*/
if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
pm_runtime_resume(&udev->dev);
udev->do_remote_wakeup = w;
}
/* The device lock is held by the PM core */
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
int r;
unbind_no_pm_drivers_interfaces(udev);
/* From now on we are sure all drivers support suspend/resume
* but not necessarily reset_resume()
* so we may still need to unbind and rebind upon resume
*/
choose_wakeup(udev, msg);
r = usb_suspend_both(udev, msg);
if (r)
return r;
if (udev->quirks & USB_QUIRK_DISCONNECT_SUSPEND)
usb_port_disable(udev);
return 0;
}
/* The device lock is held by the PM core */
int usb_resume_complete(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
/* For PM complete calls, all we do is rebind interfaces
* whose needs_binding flag is set
*/
if (udev->state != USB_STATE_NOTATTACHED)
rebind_marked_interfaces(udev);
return 0;
}
/* The device lock is held by the PM core */
int usb_resume(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
int status;
/* For all calls, take the device back to full power and
* tell the PM core in case it was autosuspended previously.
* Unbind the interfaces that will need rebinding later,
* because they fail to support reset_resume.
* (This can't be done in usb_resume_interface()
* above because it doesn't own the right set of locks.)
*/
status = usb_resume_both(udev, msg);
if (status == 0) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
unbind_marked_interfaces(udev);
}
/* Avoid PM error messages for devices disconnected while suspended
* as we'll display regular disconnect messages just a bit later.
*/
if (status == -ENODEV || status == -ESHUTDOWN)
status = 0;
return status;
}
/**
* usb_enable_autosuspend - allow a USB device to be autosuspended
* @udev: the USB device which may be autosuspended
*
* This routine allows @udev to be autosuspended. An autosuspend won't
* take place until the autosuspend_delay has elapsed and all the other
* necessary conditions are satisfied.
*
* The caller must hold @udev's device lock.
*/
void usb_enable_autosuspend(struct usb_device *udev)
{
pm_runtime_allow(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
/**
* usb_disable_autosuspend - prevent a USB device from being autosuspended
* @udev: the USB device which may not be autosuspended
*
* This routine prevents @udev from being autosuspended and wakes it up
* if it is already autosuspended.
*
* The caller must hold @udev's device lock.
*/
void usb_disable_autosuspend(struct usb_device *udev)
{
pm_runtime_forbid(&udev->dev);
}
EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
/**
* usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
* @udev: the usb_device to autosuspend
*
* This routine should be called when a core subsystem is finished using
* @udev and wants to allow it to autosuspend. Examples would be when
* @udev's device file in usbfs is closed or after a configuration change.
*
* @udev's usage counter is decremented; if it drops to 0 and all the
* interfaces are inactive then a delayed autosuspend will be attempted.
* The attempt may fail (see autosuspend_check()).
*
* The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
void usb_autosuspend_device(struct usb_device *udev)
{
int status;
usb_mark_last_busy(udev);
status = pm_runtime_put_sync_autosuspend(&udev->dev);
dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&udev->dev.power.usage_count),
status);
}
/**
* usb_autoresume_device - immediately autoresume a USB device and its interfaces
* @udev: the usb_device to autoresume
*
* This routine should be called when a core subsystem wants to use @udev
* and needs to guarantee that it is not suspended. No autosuspend will
* occur until usb_autosuspend_device() is called. (Note that this will
* not prevent suspend events originating in the PM core.) Examples would
* be when @udev's device file in usbfs is opened or when a remote-wakeup
* request is received.
*
* @udev's usage counter is incremented to prevent subsequent autosuspends.
* However if the autoresume fails then the usage counter is re-decremented.
*
* The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*
* Return: 0 on success. A negative error code otherwise.
*/
int usb_autoresume_device(struct usb_device *udev)
{
int status;
status = pm_runtime_get_sync(&udev->dev);
if (status < 0)
pm_runtime_put_sync(&udev->dev);
dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&udev->dev.power.usage_count),
status);
if (status > 0)
status = 0;
return status;
}
/**
* usb_autopm_put_interface - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
* This routine should be called by an interface driver when it is
* finished using @intf and wants to allow it to autosuspend. A typical
* example would be a character-device driver when its device file is
* closed.
*
* The routine decrements @intf's usage counter. When the counter reaches
* 0, a delayed autosuspend request for @intf's device is attempted. The
* attempt may fail (see autosuspend_check()).
*
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
int status;
usb_mark_last_busy(udev);
status = pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
/**
* usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
* This routine does much the same thing as usb_autopm_put_interface():
* It decrements @intf's usage counter and schedules a delayed
* autosuspend request if the counter is <= 0. The difference is that it
* does not perform any synchronization; callers should hold a private
* lock and handle all synchronization issues themselves.
*
* Typically a driver would call this routine during an URB's completion
* handler, if no more URBs were pending.
*
* This routine can run in atomic context.
*/
void usb_autopm_put_interface_async(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
int status;
usb_mark_last_busy(udev);
status = pm_runtime_put(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
/**
* usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
* This routine decrements @intf's usage counter but does not carry out an
* autosuspend.
*
* This routine can run in atomic context.
*/
void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
pm_runtime_put_noidle(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
/**
* usb_autopm_get_interface - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
* This routine should be called by an interface driver when it wants to
* use @intf and needs to guarantee that it is not suspended. In addition,
* the routine prevents @intf from being autosuspended subsequently. (Note
* that this will not prevent suspend events originating in the PM core.)
* This prevention will persist until usb_autopm_put_interface() is called
* or @intf is unbound. A typical example would be a character-device
* driver when its device file is opened.
*
* @intf's usage counter is incremented to prevent subsequent autosuspends.
* However if the autoresume fails then the counter is re-decremented.
*
* This routine can run only in process context.
*
* Return: 0 on success.
*/
int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
status = pm_runtime_get_sync(&intf->dev);
if (status < 0)
pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
if (status > 0)
status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
/**
* usb_autopm_get_interface_async - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
* This routine does much the same thing as
* usb_autopm_get_interface(): It increments @intf's usage counter and
* queues an autoresume request if the device is suspended. The
* differences are that it does not perform any synchronization (callers
* should hold a private lock and handle all synchronization issues
* themselves), and it does not autoresume the device directly (it only
* queues a request). After a successful call, the device may not yet be
* resumed.
*
* This routine can run in atomic context.
*
* Return: 0 on success. A negative error code otherwise.
*/
int usb_autopm_get_interface_async(struct usb_interface *intf)
{
int status;
status = pm_runtime_get(&intf->dev);
if (status < 0 && status != -EINPROGRESS)
pm_runtime_put_noidle(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
if (status > 0 || status == -EINPROGRESS)
status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
/**
* usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
* This routine increments @intf's usage counter but does not carry out an
* autoresume.
*
* This routine can run in atomic context.
*/
void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
pm_runtime_get_noresume(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
/* Internal routine to check whether we may autosuspend a device. */
static int autosuspend_check(struct usb_device *udev)
{
int w, i;
struct usb_interface *intf;
if (udev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
/* Fail if autosuspend is disabled, or any interfaces are in use, or
* any interface drivers require remote wakeup but it isn't available.
*/
w = 0;
if (udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
/* We don't need to check interfaces that are
* disabled for runtime PM. Either they are unbound
* or else their drivers don't support autosuspend
* and so they are permanently active.
*/
if (intf->dev.power.disable_depth)
continue;
if (atomic_read(&intf->dev.power.usage_count) > 0)
return -EBUSY;
w |= intf->needs_remote_wakeup;
/* Don't allow autosuspend if the device will need
* a reset-resume and any of its interface drivers
* doesn't include support or needs remote wakeup.
*/
if (udev->quirks & USB_QUIRK_RESET_RESUME) {
struct usb_driver *driver;
driver = to_usb_driver(intf->dev.driver);
if (!driver->reset_resume ||
intf->needs_remote_wakeup)
return -EOPNOTSUPP;
}
}
}
if (w && !device_can_wakeup(&udev->dev)) {
dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
return -EOPNOTSUPP;
}
/*
* If the device is a direct child of the root hub and the HCD
* doesn't handle wakeup requests, don't allow autosuspend when
* wakeup is needed.
*/
if (w && udev->parent == udev->bus->root_hub &&
bus_to_hcd(udev->bus)->cant_recv_wakeups) {
dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n");
return -EOPNOTSUPP;
}
udev->do_remote_wakeup = w;
return 0;
}
int usb_runtime_suspend(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
int status;
/* A USB device can be suspended if it passes the various autosuspend
* checks. Runtime suspend for a USB device means suspending all the
* interfaces and then the device itself.
*/
if (autosuspend_check(udev) != 0)
return -EAGAIN;
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
/* Allow a retry if autosuspend failed temporarily */
if (status == -EAGAIN || status == -EBUSY)
usb_mark_last_busy(udev);
/*
* The PM core reacts badly unless the return code is 0,
* -EAGAIN, or -EBUSY, so always return -EBUSY on an error
* (except for root hubs, because they don't suspend through
* an upstream port like other USB devices).
*/
if (status != 0 && udev->parent)
return -EBUSY;
return status;
}
int usb_runtime_resume(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
int status;
/* Runtime resume for a USB device means resuming both the device
* and all its interfaces.
*/
status = usb_resume_both(udev, PMSG_AUTO_RESUME);
return status;
}
int usb_runtime_idle(struct device *dev)
{
struct usb_device *udev = to_usb_device(dev);
/* An idle USB device can be suspended if it passes the various
* autosuspend checks.
*/
if (autosuspend_check(udev) == 0)
pm_runtime_autosuspend(dev);
/* Tell the core not to suspend it, though. */
return -EBUSY;
}
static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret = -EPERM;
if (hcd->driver->set_usb2_hw_lpm) {
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
if (!ret)
udev->usb2_hw_lpm_enabled = enable;
}
return ret;
}
int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
{
if (!udev->usb2_hw_lpm_capable ||
!udev->usb2_hw_lpm_allowed ||
udev->usb2_hw_lpm_enabled)
return 0;
return usb_set_usb2_hardware_lpm(udev, 1);
}
int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
{
if (!udev->usb2_hw_lpm_enabled)
return 0;
return usb_set_usb2_hardware_lpm(udev, 0);
}
#endif /* CONFIG_PM */
const struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
.need_parent_lock = true,
};
| linux-master | drivers/usb/core/driver.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2002
*/
#include <linux/bcd.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/utsname.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/kcov.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/otg.h>
#include "usb.h"
#include "phy.h"
/*-------------------------------------------------------------------------*/
/*
* USB Host Controller Driver framework
*
* Plugs into usbcore (usb_bus) and lets HCDs share code, minimizing
* HCD-specific behaviors/bugs.
*
* This does error checks, tracks devices and urbs, and delegates to a
* "hc_driver" only for code (and data) that really needs to know about
* hardware differences. That includes root hub registers, i/o queues,
* and so on ... but as little else as possible.
*
* Shared code includes most of the "root hub" code (these are emulated,
* though each HC's hardware works differently) and PCI glue, plus request
* tracking overhead. The HCD code should only block on spinlocks or on
* hardware handshaking; blocking on software events (such as other kernel
* threads releasing resources, or completing actions) is all generic.
*
* Happens the USB 2.0 spec says this would be invisible inside the "USBD",
* and includes mostly a "HCDI" (HCD Interface) along with some APIs used
* only by the hub driver ... and that neither should be seen or used by
* usb client device drivers.
*
* Contributors of ideas or unattributed patches include: David Brownell,
* Roman Weissgaerber, Rory Bolt, Greg Kroah-Hartman, ...
*
* HISTORY:
* 2002-02-21 Pull in most of the usb_bus support from usb.c; some
* associated cleanup. "usb_hcd" still != "usb_bus".
* 2001-12-12 Initial patch version for Linux 2.5.1 kernel.
*/
/*-------------------------------------------------------------------------*/
/* Keep track of which host controller drivers are loaded */
unsigned long usb_hcds_loaded;
EXPORT_SYMBOL_GPL(usb_hcds_loaded);
/* host controllers we manage */
DEFINE_IDR (usb_bus_idr);
EXPORT_SYMBOL_GPL (usb_bus_idr);
/* used when allocating bus numbers */
#define USB_MAXBUS 64
/* used when updating list of hcds */
DEFINE_MUTEX(usb_bus_idr_lock); /* exported only for usbfs */
EXPORT_SYMBOL_GPL (usb_bus_idr_lock);
/* used for controlling access to virtual root hubs */
static DEFINE_SPINLOCK(hcd_root_hub_lock);
/* used when updating an endpoint's URB list */
static DEFINE_SPINLOCK(hcd_urb_list_lock);
/* used to protect against unlinking URBs after the device is gone */
static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
/* wait queue for synchronous unlinks */
DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
/*-------------------------------------------------------------------------*/
/*
* Sharable chunks of root hub code.
*/
/*-------------------------------------------------------------------------*/
#define KERNEL_REL bin2bcd(LINUX_VERSION_MAJOR)
#define KERNEL_VER bin2bcd(LINUX_VERSION_PATCHLEVEL)
/* usb 3.1 root hub device descriptor */
static const u8 usb31_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x10, 0x03, /* __le16 bcdUSB; v3.1 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
0x00, /* __u8 bDeviceSubClass; */
0x03, /* __u8 bDeviceProtocol; USB 3 hub */
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
0x02, /* __u8 iProduct; */
0x01, /* __u8 iSerialNumber; */
0x01 /* __u8 bNumConfigurations; */
};
/* usb 3.0 root hub device descriptor */
static const u8 usb3_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x00, 0x03, /* __le16 bcdUSB; v3.0 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
0x00, /* __u8 bDeviceSubClass; */
0x03, /* __u8 bDeviceProtocol; USB 3.0 hub */
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
0x02, /* __u8 iProduct; */
0x01, /* __u8 iSerialNumber; */
0x01 /* __u8 bNumConfigurations; */
};
/* usb 2.0 root hub device descriptor */
static const u8 usb2_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x00, 0x02, /* __le16 bcdUSB; v2.0 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
0x00, /* __u8 bDeviceSubClass; */
0x00, /* __u8 bDeviceProtocol; [ usb 2.0 no TT ] */
0x40, /* __u8 bMaxPacketSize0; 64 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
0x02, 0x00, /* __le16 idProduct; device 0x0002 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
0x02, /* __u8 iProduct; */
0x01, /* __u8 iSerialNumber; */
0x01 /* __u8 bNumConfigurations; */
};
/* no usb 2.0 root hub "device qualifier" descriptor: one speed only */
/* usb 1.1 root hub device descriptor */
static const u8 usb11_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x10, 0x01, /* __le16 bcdUSB; v1.1 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
0x00, /* __u8 bDeviceSubClass; */
0x00, /* __u8 bDeviceProtocol; [ low/full speeds only ] */
0x40, /* __u8 bMaxPacketSize0; 64 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
0x01, 0x00, /* __le16 idProduct; device 0x0001 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
0x02, /* __u8 iProduct; */
0x01, /* __u8 iSerialNumber; */
0x01 /* __u8 bNumConfigurations; */
};
/*-------------------------------------------------------------------------*/
/* Configuration descriptors for our root hubs */
static const u8 fs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x19, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
/* USB 1.1:
* USB 2.0, single TT organization (mandatory):
* one interface, protocol 0
*
* USB 2.0, multiple TT organization (optional):
* two interfaces, protocols 1 (like single TT)
* and 2 (multiple TT mode) ... config is
* sometimes settable
* NOT IMPLEMENTED
*/
/* one interface */
0x09, /* __u8 if_bLength; */
USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
0x00, /* __u8 if_bInterfaceSubClass; */
0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
0x00, /* __u8 if_iInterface; */
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
0xff /* __u8 ep_bInterval; (255ms -- usb 2.0 spec) */
};
static const u8 hs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x19, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
/* USB 1.1:
* USB 2.0, single TT organization (mandatory):
* one interface, protocol 0
*
* USB 2.0, multiple TT organization (optional):
* two interfaces, protocols 1 (like single TT)
* and 2 (multiple TT mode) ... config is
* sometimes settable
* NOT IMPLEMENTED
*/
/* one interface */
0x09, /* __u8 if_bLength; */
USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
0x00, /* __u8 if_bInterfaceSubClass; */
0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
0x00, /* __u8 if_iInterface; */
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
* see hub.c:hub_configure() for details. */
(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
};
static const u8 ss_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x1f, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
0x00, /* __u8 iConfiguration; */
0xc0, /* __u8 bmAttributes;
Bit 7: must be set,
6: Self-powered,
5: Remote wakeup,
4..0: resvd */
0x00, /* __u8 MaxPower; */
/* one interface */
0x09, /* __u8 if_bLength; */
USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
0x00, /* __u8 if_bInterfaceSubClass; */
0x00, /* __u8 if_bInterfaceProtocol; */
0x00, /* __u8 if_iInterface; */
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
* see hub.c:hub_configure() for details. */
(USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
0x0c, /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
/* one SuperSpeed endpoint companion descriptor */
0x06, /* __u8 ss_bLength */
USB_DT_SS_ENDPOINT_COMP, /* __u8 ss_bDescriptorType; SuperSpeed EP */
/* Companion */
0x00, /* __u8 ss_bMaxBurst; allows 1 TX between ACKs */
0x00, /* __u8 ss_bmAttributes; 1 packet per service interval */
0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */
};
/* authorized_default behaviour:
* -1 is authorized for all devices (leftover from wireless USB)
* 0 is unauthorized for all devices
* 1 is authorized for all devices
* 2 is authorized for internal devices
*/
#define USB_AUTHORIZE_WIRED -1
#define USB_AUTHORIZE_NONE 0
#define USB_AUTHORIZE_ALL 1
#define USB_AUTHORIZE_INTERNAL 2
static int authorized_default = USB_AUTHORIZE_WIRED;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
"Default USB device authorization: 0 is not authorized, 1 is "
"authorized, 2 is authorized for internal devices, -1 is "
"authorized (default, same as 1)");
/*-------------------------------------------------------------------------*/
/**
* ascii2desc() - Helper routine for producing UTF-16LE string descriptors
* @s: Null-terminated ASCII (actually ISO-8859-1) string
* @buf: Buffer for USB string descriptor (header + UTF-16LE)
* @len: Length (in bytes; may be odd) of descriptor buffer.
*
* Return: The number of bytes filled in: 2 + 2*strlen(s) or @len,
* whichever is less.
*
* Note:
* USB String descriptors can contain at most 126 characters; input
* strings longer than that are truncated.
*/
static unsigned
ascii2desc(char const *s, u8 *buf, unsigned len)
{
unsigned n, t = 2 + 2*strlen(s);
if (t > 254)
t = 254; /* Longest possible UTF string descriptor */
if (len > t)
len = t;
t += USB_DT_STRING << 8; /* Now t is first 16 bits to store */
n = len;
while (n--) {
*buf++ = t;
if (!n--)
break;
*buf++ = t >> 8;
t = (unsigned char)*s++;
}
return len;
}
/**
* rh_string() - provides string descriptors for root hub
* @id: the string ID number (0: langids, 1: serial #, 2: product, 3: vendor)
* @hcd: the host controller for this root hub
* @data: buffer for output packet
* @len: length of the provided buffer
*
* Produces either a manufacturer, product or serial number string for the
* virtual root hub device.
*
* Return: The number of bytes filled in: the length of the descriptor or
* of the provided buffer, whichever is less.
*/
static unsigned
rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
{
char buf[100];
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
/* language ids */
switch (id) {
case 0:
/* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
/* See http://www.usb.org/developers/docs/USB_LANGIDs.pdf */
if (len > 4)
len = 4;
memcpy(data, langids, len);
return len;
case 1:
/* Serial number */
s = hcd->self.bus_name;
break;
case 2:
/* Product name */
s = hcd->product_desc;
break;
case 3:
/* Manufacturer */
snprintf (buf, sizeof buf, "%s %s %s", init_utsname()->sysname,
init_utsname()->release, hcd->driver->description);
s = buf;
break;
default:
/* Can't happen; caller guarantees it */
return 0;
}
return ascii2desc(s, data, len);
}
/* Root hub control transfers execute synchronously */
static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
{
struct usb_ctrlrequest *cmd;
u16 typeReq, wValue, wIndex, wLength;
u8 *ubuf = urb->transfer_buffer;
unsigned len = 0;
int status;
u8 patch_wakeup = 0;
u8 patch_protocol = 0;
u16 tbuf_size;
u8 *tbuf = NULL;
const u8 *bufp;
might_sleep();
spin_lock_irq(&hcd_root_hub_lock);
status = usb_hcd_link_urb_to_ep(hcd, urb);
spin_unlock_irq(&hcd_root_hub_lock);
if (status)
return status;
urb->hcpriv = hcd; /* Indicate it's queued */
cmd = (struct usb_ctrlrequest *) urb->setup_packet;
typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
wValue = le16_to_cpu (cmd->wValue);
wIndex = le16_to_cpu (cmd->wIndex);
wLength = le16_to_cpu (cmd->wLength);
if (wLength > urb->transfer_buffer_length)
goto error;
/*
* tbuf should be at least as big as the
* USB hub descriptor.
*/
tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
tbuf = kzalloc(tbuf_size, GFP_KERNEL);
if (!tbuf) {
status = -ENOMEM;
goto err_alloc;
}
bufp = tbuf;
urb->actual_length = 0;
switch (typeReq) {
/* DEVICE REQUESTS */
/* The root hub's remote wakeup enable bit is implemented using
* driver model wakeup flags. If this system supports wakeup
* through USB, userspace may change the default "allow wakeup"
* policy through sysfs or these calls.
*
* Most root hubs support wakeup from downstream devices, for
* runtime power management (disabling USB clocks and reducing
* VBUS power usage). However, not all of them do so; silicon,
* board, and BIOS bugs here are not uncommon, so these can't
* be treated quite like external hubs.
*
* Likewise, not all root hubs will pass wakeup events upstream,
* to wake up the whole system. So don't assume root hub and
* controller capabilities are identical.
*/
case DeviceRequest | USB_REQ_GET_STATUS:
tbuf[0] = (device_may_wakeup(&hcd->self.root_hub->dev)
<< USB_DEVICE_REMOTE_WAKEUP)
| (1 << USB_DEVICE_SELF_POWERED);
tbuf[1] = 0;
len = 2;
break;
case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
if (wValue == USB_DEVICE_REMOTE_WAKEUP)
device_set_wakeup_enable(&hcd->self.root_hub->dev, 0);
else
goto error;
break;
case DeviceOutRequest | USB_REQ_SET_FEATURE:
if (device_can_wakeup(&hcd->self.root_hub->dev)
&& wValue == USB_DEVICE_REMOTE_WAKEUP)
device_set_wakeup_enable(&hcd->self.root_hub->dev, 1);
else
goto error;
break;
case DeviceRequest | USB_REQ_GET_CONFIGURATION:
tbuf[0] = 1;
len = 1;
fallthrough;
case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
switch (wValue & 0xff00) {
case USB_DT_DEVICE << 8:
switch (hcd->speed) {
case HCD_USB32:
case HCD_USB31:
bufp = usb31_rh_dev_descriptor;
break;
case HCD_USB3:
bufp = usb3_rh_dev_descriptor;
break;
case HCD_USB2:
bufp = usb2_rh_dev_descriptor;
break;
case HCD_USB11:
bufp = usb11_rh_dev_descriptor;
break;
default:
goto error;
}
len = 18;
if (hcd->has_tt)
patch_protocol = 1;
break;
case USB_DT_CONFIG << 8:
switch (hcd->speed) {
case HCD_USB32:
case HCD_USB31:
case HCD_USB3:
bufp = ss_rh_config_descriptor;
len = sizeof ss_rh_config_descriptor;
break;
case HCD_USB2:
bufp = hs_rh_config_descriptor;
len = sizeof hs_rh_config_descriptor;
break;
case HCD_USB11:
bufp = fs_rh_config_descriptor;
len = sizeof fs_rh_config_descriptor;
break;
default:
goto error;
}
if (device_can_wakeup(&hcd->self.root_hub->dev))
patch_wakeup = 1;
break;
case USB_DT_STRING << 8:
if ((wValue & 0xff) < 4)
urb->actual_length = rh_string(wValue & 0xff,
hcd, ubuf, wLength);
else /* unsupported IDs --> "protocol stall" */
goto error;
break;
case USB_DT_BOS << 8:
goto nongeneric;
default:
goto error;
}
break;
case DeviceRequest | USB_REQ_GET_INTERFACE:
tbuf[0] = 0;
len = 1;
fallthrough;
case DeviceOutRequest | USB_REQ_SET_INTERFACE:
break;
case DeviceOutRequest | USB_REQ_SET_ADDRESS:
/* wValue == urb->dev->devaddr */
dev_dbg (hcd->self.controller, "root hub device address %d\n",
wValue);
break;
/* INTERFACE REQUESTS (no defined feature/status flags) */
/* ENDPOINT REQUESTS */
case EndpointRequest | USB_REQ_GET_STATUS:
/* ENDPOINT_HALT flag */
tbuf[0] = 0;
tbuf[1] = 0;
len = 2;
fallthrough;
case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
case EndpointOutRequest | USB_REQ_SET_FEATURE:
dev_dbg (hcd->self.controller, "no endpoint features yet\n");
break;
/* CLASS REQUESTS (and errors) */
default:
nongeneric:
/* non-generic request */
switch (typeReq) {
case GetHubStatus:
len = 4;
break;
case GetPortStatus:
if (wValue == HUB_PORT_STATUS)
len = 4;
else
/* other port status types return 8 bytes */
len = 8;
break;
case GetHubDescriptor:
len = sizeof (struct usb_hub_descriptor);
break;
case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
/* len is returned by hub_control */
break;
}
status = hcd->driver->hub_control (hcd,
typeReq, wValue, wIndex,
tbuf, wLength);
if (typeReq == GetHubDescriptor)
usb_hub_adjust_deviceremovable(hcd->self.root_hub,
(struct usb_hub_descriptor *)tbuf);
break;
error:
/* "protocol stall" on error */
status = -EPIPE;
}
if (status < 0) {
len = 0;
if (status != -EPIPE) {
dev_dbg (hcd->self.controller,
"CTRL: TypeReq=0x%x val=0x%x "
"idx=0x%x len=%d ==> %d\n",
typeReq, wValue, wIndex,
wLength, status);
}
} else if (status > 0) {
/* hub_control may return the length of data copied. */
len = status;
status = 0;
}
if (len) {
if (urb->transfer_buffer_length < len)
len = urb->transfer_buffer_length;
urb->actual_length = len;
/* always USB_DIR_IN, toward host */
memcpy (ubuf, bufp, len);
/* report whether RH hardware supports remote wakeup */
if (patch_wakeup &&
len > offsetof (struct usb_config_descriptor,
bmAttributes))
((struct usb_config_descriptor *)ubuf)->bmAttributes
|= USB_CONFIG_ATT_WAKEUP;
/* report whether RH hardware has an integrated TT */
if (patch_protocol &&
len > offsetof(struct usb_device_descriptor,
bDeviceProtocol))
((struct usb_device_descriptor *) ubuf)->
bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
}
kfree(tbuf);
err_alloc:
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_giveback_urb(hcd, urb, status);
spin_unlock_irq(&hcd_root_hub_lock);
return 0;
}
/*-------------------------------------------------------------------------*/
/*
* Root Hub interrupt transfers are polled using a timer if the
* driver requests it; otherwise the driver is responsible for
* calling usb_hcd_poll_rh_status() when an event occurs.
*
* Completion handler may not sleep. See usb_hcd_giveback_urb() for details.
*/
void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
{
struct urb *urb;
int length;
int status;
unsigned long flags;
char buffer[6]; /* Any root hubs with > 31 ports? */
if (unlikely(!hcd->rh_pollable))
return;
if (!hcd->uses_new_polling && !hcd->status_urb)
return;
length = hcd->driver->hub_status_data(hcd, buffer);
if (length > 0) {
/* try to complete the status urb */
spin_lock_irqsave(&hcd_root_hub_lock, flags);
urb = hcd->status_urb;
if (urb) {
clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
hcd->status_urb = NULL;
if (urb->transfer_buffer_length >= length) {
status = 0;
} else {
status = -EOVERFLOW;
length = urb->transfer_buffer_length;
}
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_giveback_urb(hcd, urb, status);
} else {
length = 0;
set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
}
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
}
/* The USB 2.0 spec says 256 ms. This is close enough and won't
* exceed that limit if HZ is 100. The math is more clunky than
* maybe expected, this is to make sure that all timers for USB devices
* fire at the same time to give the CPU a break in between */
if (hcd->uses_new_polling ? HCD_POLL_RH(hcd) :
(length == 0 && hcd->status_urb != NULL))
mod_timer (&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
}
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
static void rh_timer_func (struct timer_list *t)
{
struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
usb_hcd_poll_rh_status(_hcd);
}
/*-------------------------------------------------------------------------*/
static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
{
int retval;
unsigned long flags;
unsigned len = 1 + (urb->dev->maxchild / 8);
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->status_urb || urb->transfer_buffer_length < len) {
dev_dbg (hcd->self.controller, "not queuing rh status urb\n");
retval = -EINVAL;
goto done;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto done;
hcd->status_urb = urb;
urb->hcpriv = hcd; /* indicate it's queued */
if (!hcd->uses_new_polling)
mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
/* If a status change has already occurred, report it ASAP */
else if (HCD_POLL_PENDING(hcd))
mod_timer(&hcd->rh_timer, jiffies);
retval = 0;
done:
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
return retval;
}
static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
{
if (usb_endpoint_xfer_int(&urb->ep->desc))
return rh_queue_status (hcd, urb);
if (usb_endpoint_xfer_control(&urb->ep->desc))
return rh_call_control (hcd, urb);
return -EINVAL;
}
/*-------------------------------------------------------------------------*/
/* Unlinks of root-hub control URBs are legal, but they don't do anything
* since these URBs always execute synchronously.
*/
static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
int rc;
spin_lock_irqsave(&hcd_root_hub_lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto done;
if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */
; /* Do nothing */
} else { /* Status URB */
if (!hcd->uses_new_polling)
del_timer (&hcd->rh_timer);
if (urb == hcd->status_urb) {
hcd->status_urb = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
usb_hcd_giveback_urb(hcd, urb, status);
}
}
done:
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
return rc;
}
/*-------------------------------------------------------------------------*/
/**
* usb_bus_init - shared initialization code
* @bus: the bus structure being initialized
*
* This code is used to initialize a usb_bus structure, memory for which is
* separately managed.
*/
static void usb_bus_init (struct usb_bus *bus)
{
memset (&bus->devmap, 0, sizeof(struct usb_devmap));
bus->devnum_next = 1;
bus->root_hub = NULL;
bus->busnum = -1;
bus->bandwidth_allocated = 0;
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
mutex_init(&bus->devnum_next_mutex);
}
/*-------------------------------------------------------------------------*/
/**
* usb_register_bus - registers the USB host controller with the usb core
* @bus: pointer to the bus to register
*
* Context: task context, might sleep.
*
* Assigns a bus number, and links the controller into usbcore data
* structures so that it can be seen by scanning the bus list.
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int usb_register_bus(struct usb_bus *bus)
{
int result = -E2BIG;
int busnum;
mutex_lock(&usb_bus_idr_lock);
busnum = idr_alloc(&usb_bus_idr, bus, 1, USB_MAXBUS, GFP_KERNEL);
if (busnum < 0) {
pr_err("%s: failed to get bus number\n", usbcore_name);
goto error_find_busnum;
}
bus->busnum = busnum;
mutex_unlock(&usb_bus_idr_lock);
usb_notify_add_bus(bus);
dev_info (bus->controller, "new USB bus registered, assigned bus "
"number %d\n", bus->busnum);
return 0;
error_find_busnum:
mutex_unlock(&usb_bus_idr_lock);
return result;
}
/**
* usb_deregister_bus - deregisters the USB host controller
* @bus: pointer to the bus to deregister
*
* Context: task context, might sleep.
*
* Recycles the bus number, and unlinks the controller from usbcore data
* structures so that it won't be seen by scanning the bus list.
*/
static void usb_deregister_bus (struct usb_bus *bus)
{
dev_info (bus->controller, "USB bus %d deregistered\n", bus->busnum);
/*
* NOTE: make sure that all the devices are removed by the
* controller code, as well as having it call this when cleaning
* itself up
*/
mutex_lock(&usb_bus_idr_lock);
idr_remove(&usb_bus_idr, bus->busnum);
mutex_unlock(&usb_bus_idr_lock);
usb_notify_remove_bus(bus);
}
/**
* register_root_hub - called by usb_add_hcd() to register a root hub
* @hcd: host controller for this root hub
*
* This function registers the root hub with the USB subsystem. It sets up
* the device properly in the device tree and then calls usb_new_device()
* to register the usb device. It also assigns the root hub's USB address
* (always 1).
*
* Return: 0 if successful. A negative error code otherwise.
*/
static int register_root_hub(struct usb_hcd *hcd)
{
struct device *parent_dev = hcd->self.controller;
struct usb_device *usb_dev = hcd->self.root_hub;
struct usb_device_descriptor *descr;
const int devnum = 1;
int retval;
usb_dev->devnum = devnum;
usb_dev->bus->devnum_next = devnum + 1;
set_bit (devnum, usb_dev->bus->devmap.devicemap);
usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
mutex_lock(&usb_bus_idr_lock);
usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
descr = usb_get_device_descriptor(usb_dev);
if (IS_ERR(descr)) {
retval = PTR_ERR(descr);
mutex_unlock(&usb_bus_idr_lock);
dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
dev_name(&usb_dev->dev), retval);
return retval;
}
usb_dev->descriptor = *descr;
kfree(descr);
if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(usb_dev);
if (!retval) {
usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
} else if (usb_dev->speed >= USB_SPEED_SUPER) {
mutex_unlock(&usb_bus_idr_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
return retval;
}
}
retval = usb_new_device (usb_dev);
if (retval) {
dev_err (parent_dev, "can't register root hub for %s, %d\n",
dev_name(&usb_dev->dev), retval);
} else {
spin_lock_irq (&hcd_root_hub_lock);
hcd->rh_registered = 1;
spin_unlock_irq (&hcd_root_hub_lock);
/* Did the HC die before the root hub was registered? */
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
}
mutex_unlock(&usb_bus_idr_lock);
return retval;
}
/*
* usb_hcd_start_port_resume - a root-hub port is sending a resume signal
* @bus: the bus which the root hub belongs to
* @portnum: the port which is being resumed
*
* HCDs should call this function when they know that a resume signal is
* being sent to a root-hub port. The root hub will be prevented from
* going into autosuspend until usb_hcd_end_port_resume() is called.
*
* The bus's private lock must be held by the caller.
*/
void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
{
unsigned bit = 1 << portnum;
if (!(bus->resuming_ports & bit)) {
bus->resuming_ports |= bit;
pm_runtime_get_noresume(&bus->root_hub->dev);
}
}
EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
/*
* usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
* @bus: the bus which the root hub belongs to
* @portnum: the port which is being resumed
*
* HCDs should call this function when they know that a resume signal has
* stopped being sent to a root-hub port. The root hub will be allowed to
* autosuspend again.
*
* The bus's private lock must be held by the caller.
*/
void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
{
unsigned bit = 1 << portnum;
if (bus->resuming_ports & bit) {
bus->resuming_ports &= ~bit;
pm_runtime_put_noidle(&bus->root_hub->dev);
}
}
EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
/*-------------------------------------------------------------------------*/
/**
* usb_calc_bus_time - approximate periodic transaction time in nanoseconds
* @speed: from dev->speed; USB_SPEED_{LOW,FULL,HIGH}
* @is_input: true iff the transaction sends data to the host
* @isoc: true for isochronous transactions, false for interrupt ones
* @bytecount: how many bytes in the transaction.
*
* Return: Approximate bus time in nanoseconds for a periodic transaction.
*
* Note:
* See USB 2.0 spec section 5.11.3; only periodic transfers need to be
* scheduled in software, this function is only used for such scheduling.
*/
long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount)
{
unsigned long tmp;
switch (speed) {
case USB_SPEED_LOW: /* INTR only */
if (is_input) {
tmp = (67667L * (31L + 10L * BitTime (bytecount))) / 1000L;
return 64060L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
} else {
tmp = (66700L * (31L + 10L * BitTime (bytecount))) / 1000L;
return 64107L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
}
case USB_SPEED_FULL: /* ISOC or INTR */
if (isoc) {
tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
return ((is_input) ? 7268L : 6265L) + BW_HOST_DELAY + tmp;
} else {
tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
return 9107L + BW_HOST_DELAY + tmp;
}
case USB_SPEED_HIGH: /* ISOC or INTR */
/* FIXME adjust for input vs output */
if (isoc)
tmp = HS_NSECS_ISO (bytecount);
else
tmp = HS_NSECS (bytecount);
return tmp;
default:
pr_debug ("%s: bogus device speed!\n", usbcore_name);
return -1;
}
}
EXPORT_SYMBOL_GPL(usb_calc_bus_time);
/*-------------------------------------------------------------------------*/
/*
* Generic HC operations.
*/
/*-------------------------------------------------------------------------*/
/**
* usb_hcd_link_urb_to_ep - add an URB to its endpoint queue
* @hcd: host controller to which @urb was submitted
* @urb: URB being submitted
*
* Host controller drivers should call this routine in their enqueue()
* method. The HCD's private spinlock must be held and interrupts must
* be disabled. The actions carried out here are required for URB
* submission, as well as for endpoint shutdown and for usb_kill_urb.
*
* Return: 0 for no error, otherwise a negative error code (in which case
* the enqueue() method must fail). If no error occurs but enqueue() fails
* anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
* the private spinlock and returning.
*/
int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
{
int rc = 0;
spin_lock(&hcd_urb_list_lock);
/* Check that the URB isn't being killed */
if (unlikely(atomic_read(&urb->reject))) {
rc = -EPERM;
goto done;
}
if (unlikely(!urb->ep->enabled)) {
rc = -ENOENT;
goto done;
}
if (unlikely(!urb->dev->can_submit)) {
rc = -EHOSTUNREACH;
goto done;
}
/*
* Check the host controller's state and add the URB to the
* endpoint's queue.
*/
if (HCD_RH_RUNNING(hcd)) {
urb->unlinked = 0;
list_add_tail(&urb->urb_list, &urb->ep->urb_list);
} else {
rc = -ESHUTDOWN;
goto done;
}
done:
spin_unlock(&hcd_urb_list_lock);
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
/**
* usb_hcd_check_unlink_urb - check whether an URB may be unlinked
* @hcd: host controller to which @urb was submitted
* @urb: URB being checked for unlinkability
* @status: error code to store in @urb if the unlink succeeds
*
* Host controller drivers should call this routine in their dequeue()
* method. The HCD's private spinlock must be held and interrupts must
* be disabled. The actions carried out here are required for making
* sure than an unlink is valid.
*
* Return: 0 for no error, otherwise a negative error code (in which case
* the dequeue() method must fail). The possible error codes are:
*
* -EIDRM: @urb was not submitted or has already completed.
* The completion function may not have been called yet.
*
* -EBUSY: @urb has already been unlinked.
*/
int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct list_head *tmp;
/* insist the urb is still queued */
list_for_each(tmp, &urb->ep->urb_list) {
if (tmp == &urb->urb_list)
break;
}
if (tmp != &urb->urb_list)
return -EIDRM;
/* Any status except -EINPROGRESS means something already started to
* unlink this URB from the hardware. So there's no more work to do.
*/
if (urb->unlinked)
return -EBUSY;
urb->unlinked = status;
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
/**
* usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
* @hcd: host controller to which @urb was submitted
* @urb: URB being unlinked
*
* Host controller drivers should call this routine before calling
* usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
* interrupts must be disabled. The actions carried out here are required
* for URB completion.
*/
void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
{
/* clear all state linking urb to this dev (and hcd) */
spin_lock(&hcd_urb_list_lock);
list_del_init(&urb->urb_list);
spin_unlock(&hcd_urb_list_lock);
}
EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
/*
* Some usb host controllers can only perform dma using a small SRAM area,
* or have restrictions on addressable DRAM.
* The usb core itself is however optimized for host controllers that can dma
* using regular system memory - like pci devices doing bus mastering.
*
* To support host controllers with limited dma capabilities we provide dma
* bounce buffers. This feature can be enabled by initializing
* hcd->localmem_pool using usb_hcd_setup_local_mem().
*
* The initialized hcd->localmem_pool then tells the usb code to allocate all
* data for dma using the genalloc API.
*
* So, to summarize...
*
* - We need "local" memory, canonical example being
* a small SRAM on a discrete controller being the
* only memory that the controller can read ...
* (a) "normal" kernel memory is no good, and
* (b) there's not enough to share
*
* - So we use that, even though the primary requirement
* is that the memory be "local" (hence addressable
* by that device), not "coherent".
*
*/
static int hcd_alloc_coherent(struct usb_bus *bus,
gfp_t mem_flags, dma_addr_t *dma_handle,
void **vaddr_handle, size_t size,
enum dma_data_direction dir)
{
unsigned char *vaddr;
if (*vaddr_handle == NULL) {
WARN_ON_ONCE(1);
return -EFAULT;
}
vaddr = hcd_buffer_alloc(bus, size + sizeof(unsigned long),
mem_flags, dma_handle);
if (!vaddr)
return -ENOMEM;
/*
* Store the virtual address of the buffer at the end
* of the allocated dma buffer. The size of the buffer
* may be uneven so use unaligned functions instead
* of just rounding up. It makes sense to optimize for
* memory footprint over access speed since the amount
* of memory available for dma may be limited.
*/
put_unaligned((unsigned long)*vaddr_handle,
(unsigned long *)(vaddr + size));
if (dir == DMA_TO_DEVICE)
memcpy(vaddr, *vaddr_handle, size);
*vaddr_handle = vaddr;
return 0;
}
static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
void **vaddr_handle, size_t size,
enum dma_data_direction dir)
{
unsigned char *vaddr = *vaddr_handle;
vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size));
if (dir == DMA_FROM_DEVICE)
memcpy(vaddr, *vaddr_handle, size);
hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle);
*vaddr_handle = vaddr;
*dma_handle = 0;
}
void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_SETUP_MAP_SINGLE))
dma_unmap_single(hcd->self.sysdev,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
hcd_free_coherent(urb->dev->bus,
&urb->setup_dma,
(void **) &urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
/* Make it safe to call this routine more than once */
urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL);
}
EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_setup_for_dma);
static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
if (hcd->driver->unmap_urb_for_dma)
hcd->driver->unmap_urb_for_dma(hcd, urb);
else
usb_hcd_unmap_urb_for_dma(hcd, urb);
}
void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
enum dma_data_direction dir;
usb_hcd_unmap_urb_setup_for_dma(hcd, urb);
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SG))
dma_unmap_sg(hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_PAGE))
dma_unmap_page(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
else if (IS_ENABLED(CONFIG_HAS_DMA) &&
(urb->transfer_flags & URB_DMA_MAP_SINGLE))
dma_unmap_single(hcd->self.sysdev,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
else if (urb->transfer_flags & URB_MAP_LOCAL)
hcd_free_coherent(urb->dev->bus,
&urb->transfer_dma,
&urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
/* Make it safe to call this routine more than once */
urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
}
EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_for_dma);
static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
if (hcd->driver->map_urb_for_dma)
return hcd->driver->map_urb_for_dma(hcd, urb, mem_flags);
else
return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
}
int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
enum dma_data_direction dir;
int ret = 0;
/* Map the URB's buffers for DMA access.
* Lower level HCD code should use *_dma exclusively,
* unless it uses pio or talks to another transport,
* or uses the provided scatter gather list for bulk.
*/
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
if (hcd->localmem_pool) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->setup_dma,
(void **)&urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
if (ret)
return ret;
urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
} else if (hcd_uses_dma(hcd)) {
if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
}
urb->setup_dma = dma_map_single(
hcd->self.sysdev,
urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
if (dma_mapping_error(hcd->self.sysdev,
urb->setup_dma))
return -EAGAIN;
urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
}
}
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
if (hcd->localmem_pool) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->transfer_dma,
&urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
if (ret == 0)
urb->transfer_flags |= URB_MAP_LOCAL;
} else if (hcd_uses_dma(hcd)) {
if (urb->num_sgs) {
int n;
/* We don't support sg for isoc transfers ! */
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
WARN_ON(1);
return -EINVAL;
}
n = dma_map_sg(
hcd->self.sysdev,
urb->sg,
urb->num_sgs,
dir);
if (!n)
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
urb->num_mapped_sgs = n;
if (n != urb->num_sgs)
urb->transfer_flags |=
URB_DMA_SG_COMBINED;
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
hcd->self.sysdev,
sg_page(sg),
sg->offset,
urb->transfer_buffer_length,
dir);
if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_PAGE;
} else if (object_is_on_stack(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer is on stack\n");
ret = -EAGAIN;
} else {
urb->transfer_dma = dma_map_single(
hcd->self.sysdev,
urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
if (dma_mapping_error(hcd->self.sysdev,
urb->transfer_dma))
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SINGLE;
}
}
if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
URB_SETUP_MAP_LOCAL)))
usb_hcd_unmap_urb_for_dma(hcd, urb);
}
return ret;
}
EXPORT_SYMBOL_GPL(usb_hcd_map_urb_for_dma);
/*-------------------------------------------------------------------------*/
/* may be called in any context with a valid urb->dev usecount
* caller surrenders "ownership" of urb
* expects usb_submit_urb() to have sanity checked and conditioned all
* inputs in the urb
*/
int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
{
int status;
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
/* increment urb's reference count as part of giving it to the HCD
* (which will control it). HCD guarantees that it either returns
* an error or calls giveback(), but not both.
*/
usb_get_urb(urb);
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
usbmon_urb_submit(&hcd->self, urb);
/* NOTE requirements on root-hub callers (usbfs and the hub
* driver, for now): URBs' urb->transfer_buffer must be
* valid and usb_buffer_{sync,unmap}() not be needed, since
* they could clobber root hub response data. Also, control
* URBs must be submitted in process context with interrupts
* enabled.
*/
if (is_root_hub(urb->dev)) {
status = rh_urb_enqueue(hcd, urb);
} else {
status = map_urb_for_dma(hcd, urb, mem_flags);
if (likely(status == 0)) {
status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
if (unlikely(status))
unmap_urb_for_dma(hcd, urb);
}
}
if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
}
return status;
}
/*-------------------------------------------------------------------------*/
/* this makes the hcd giveback() the urb more quickly, by kicking it
* off hardware queues (which may take a while) and returning it as
* soon as practical. we've already set up the urb's return status,
* but we can't know if the callback completed already.
*/
static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
{
int value;
if (is_root_hub(urb->dev))
value = usb_rh_urb_dequeue(hcd, urb, status);
else {
/* The only reason an HCD might fail this call is if
* it has not yet fully queued the urb to begin with.
* Such failures should be harmless. */
value = hcd->driver->urb_dequeue(hcd, urb, status);
}
return value;
}
/*
* called in any context
*
* caller guarantees urb won't be recycled till both unlink()
* and the urb's completion function return
*/
int usb_hcd_unlink_urb (struct urb *urb, int status)
{
struct usb_hcd *hcd;
struct usb_device *udev = urb->dev;
int retval = -EIDRM;
unsigned long flags;
/* Prevent the device and bus from going away while
* the unlink is carried out. If they are already gone
* then urb->use_count must be 0, since disconnected
* devices can't have any active URBs.
*/
spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
if (atomic_read(&urb->use_count) > 0) {
retval = 0;
usb_get_dev(udev);
}
spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
if (retval == 0) {
hcd = bus_to_hcd(urb->dev->bus);
retval = unlink1(hcd, urb, status);
if (retval == 0)
retval = -EINPROGRESS;
else if (retval != -EIDRM && retval != -EBUSY)
dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
urb, retval);
usb_put_dev(udev);
}
return retval;
}
/*-------------------------------------------------------------------------*/
static void __usb_hcd_giveback_urb(struct urb *urb)
{
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
urb->actual_length < urb->transfer_buffer_length &&
!status))
status = -EREMOTEIO;
unmap_urb_for_dma(hcd, urb);
usbmon_urb_complete(&hcd->self, urb, status);
usb_anchor_suspend_wakeups(anchor);
usb_unanchor_urb(urb);
if (likely(status == 0))
usb_led_activity(USB_LED_EVENT_HOST);
/* pass ownership to the completion handler */
urb->status = status;
/*
* This function can be called in task context inside another remote
* coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
/*
* Order the write of urb->use_count above before the read
* of urb->reject below. Pairs with the memory barriers in
* usb_kill_urb() and usb_poison_urb().
*/
smp_mb__after_atomic();
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
}
static void usb_giveback_urb_bh(struct tasklet_struct *t)
{
struct giveback_urb_bh *bh = from_tasklet(bh, t, bh);
struct list_head local_list;
spin_lock_irq(&bh->lock);
bh->running = true;
list_replace_init(&bh->head, &local_list);
spin_unlock_irq(&bh->lock);
while (!list_empty(&local_list)) {
struct urb *urb;
urb = list_entry(local_list.next, struct urb, urb_list);
list_del_init(&urb->urb_list);
bh->completing_ep = urb->ep;
__usb_hcd_giveback_urb(urb);
bh->completing_ep = NULL;
}
/*
* giveback new URBs next time to prevent this function
* from not exiting for a long time.
*/
spin_lock_irq(&bh->lock);
if (!list_empty(&bh->head)) {
if (bh->high_prio)
tasklet_hi_schedule(&bh->bh);
else
tasklet_schedule(&bh->bh);
}
bh->running = false;
spin_unlock_irq(&bh->lock);
}
/**
* usb_hcd_giveback_urb - return URB from HCD to device driver
* @hcd: host controller returning the URB
* @urb: urb being returned to the USB device driver.
* @status: completion status code for the URB.
*
* Context: atomic. The completion callback is invoked in caller's context.
* For HCDs with HCD_BH flag set, the completion callback is invoked in tasklet
* context (except for URBs submitted to the root hub which always complete in
* caller's context).
*
* This hands the URB from HCD to its USB device driver, using its
* completion function. The HCD has freed all per-urb resources
* (and is done using urb->hcpriv). It also released all HCD locks;
* the device driver won't cause problems if it frees, modifies,
* or resubmits this URB.
*
* If @urb was unlinked, the value of @status will be overridden by
* @urb->unlinked. Erroneous short transfers are detected in case
* the HCD hasn't checked for them.
*/
void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct giveback_urb_bh *bh;
bool running;
/* pass status to tasklet via unlinked */
if (likely(!urb->unlinked))
urb->unlinked = status;
if (!hcd_giveback_urb_in_bh(hcd) && !is_root_hub(urb->dev)) {
__usb_hcd_giveback_urb(urb);
return;
}
if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe))
bh = &hcd->high_prio_bh;
else
bh = &hcd->low_prio_bh;
spin_lock(&bh->lock);
list_add_tail(&urb->urb_list, &bh->head);
running = bh->running;
spin_unlock(&bh->lock);
if (running)
;
else if (bh->high_prio)
tasklet_hi_schedule(&bh->bh);
else
tasklet_schedule(&bh->bh);
}
EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb);
/*-------------------------------------------------------------------------*/
/* Cancel all URBs pending on this endpoint and wait for the endpoint's
* queue to drain completely. The caller must first insure that no more
* URBs can be submitted for this endpoint.
*/
void usb_hcd_flush_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd;
struct urb *urb;
if (!ep)
return;
might_sleep();
hcd = bus_to_hcd(udev->bus);
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
continue;
usb_get_urb (urb);
is_in = usb_urb_dir_in(urb);
spin_unlock(&hcd_urb_list_lock);
/* kick hcd */
unlink1(hcd, urb, -ESHUTDOWN);
dev_dbg (hcd->self.controller,
"shutdown urb %pK ep%d%s-%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(&ep->desc)));
usb_put_urb (urb);
/* list contents may have changed */
spin_lock(&hcd_urb_list_lock);
goto rescan;
}
spin_unlock_irq(&hcd_urb_list_lock);
/* Wait until the endpoint queue is completely empty */
while (!list_empty (&ep->urb_list)) {
spin_lock_irq(&hcd_urb_list_lock);
/* The list may have changed while we acquired the spinlock */
urb = NULL;
if (!list_empty (&ep->urb_list)) {
urb = list_entry (ep->urb_list.prev, struct urb,
urb_list);
usb_get_urb (urb);
}
spin_unlock_irq(&hcd_urb_list_lock);
if (urb) {
usb_kill_urb (urb);
usb_put_urb (urb);
}
}
}
/**
* usb_hcd_alloc_bandwidth - check whether a new bandwidth setting exceeds
* the bus bandwidth
* @udev: target &usb_device
* @new_config: new configuration to install
* @cur_alt: the current alternate interface setting
* @new_alt: alternate interface setting that is being installed
*
* To change configurations, pass in the new configuration in new_config,
* and pass NULL for cur_alt and new_alt.
*
* To reset a device's configuration (put the device in the ADDRESSED state),
* pass in NULL for new_config, cur_alt, and new_alt.
*
* To change alternate interface settings, pass in NULL for new_config,
* pass in the current alternate interface setting in cur_alt,
* and pass in the new alternate interface setting in new_alt.
*
* Return: An error if the requested bandwidth change exceeds the
* bus bandwidth or host controller internal resources.
*/
int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_config *new_config,
struct usb_host_interface *cur_alt,
struct usb_host_interface *new_alt)
{
int num_intfs, i, j;
struct usb_host_interface *alt = NULL;
int ret = 0;
struct usb_hcd *hcd;
struct usb_host_endpoint *ep;
hcd = bus_to_hcd(udev->bus);
if (!hcd->driver->check_bandwidth)
return 0;
/* Configuration is being removed - set configuration 0 */
if (!new_config && !cur_alt) {
for (i = 1; i < 16; ++i) {
ep = udev->ep_out[i];
if (ep)
hcd->driver->drop_endpoint(hcd, udev, ep);
ep = udev->ep_in[i];
if (ep)
hcd->driver->drop_endpoint(hcd, udev, ep);
}
hcd->driver->check_bandwidth(hcd, udev);
return 0;
}
/* Check if the HCD says there's enough bandwidth. Enable all endpoints
* each interface's alt setting 0 and ask the HCD to check the bandwidth
* of the bus. There will always be bandwidth for endpoint 0, so it's
* ok to exclude it.
*/
if (new_config) {
num_intfs = new_config->desc.bNumInterfaces;
/* Remove endpoints (except endpoint 0, which is always on the
* schedule) from the old config from the schedule
*/
for (i = 1; i < 16; ++i) {
ep = udev->ep_out[i];
if (ep) {
ret = hcd->driver->drop_endpoint(hcd, udev, ep);
if (ret < 0)
goto reset;
}
ep = udev->ep_in[i];
if (ep) {
ret = hcd->driver->drop_endpoint(hcd, udev, ep);
if (ret < 0)
goto reset;
}
}
for (i = 0; i < num_intfs; ++i) {
struct usb_host_interface *first_alt;
int iface_num;
first_alt = &new_config->intf_cache[i]->altsetting[0];
iface_num = first_alt->desc.bInterfaceNumber;
/* Set up endpoints for alternate interface setting 0 */
alt = usb_find_alt_setting(new_config, iface_num, 0);
if (!alt)
/* No alt setting 0? Pick the first setting. */
alt = first_alt;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
if (ret < 0)
goto reset;
}
}
}
if (cur_alt && new_alt) {
struct usb_interface *iface = usb_ifnum_to_if(udev,
cur_alt->desc.bInterfaceNumber);
if (!iface)
return -EINVAL;
if (iface->resetting_device) {
/*
* The USB core just reset the device, so the xHCI host
* and the device will think alt setting 0 is installed.
* However, the USB core will pass in the alternate
* setting installed before the reset as cur_alt. Dig
* out the alternate setting 0 structure, or the first
* alternate setting if a broken device doesn't have alt
* setting 0.
*/
cur_alt = usb_altnum_to_altsetting(iface, 0);
if (!cur_alt)
cur_alt = &iface->altsetting[0];
}
/* Drop all the endpoints in the current alt setting */
for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
ret = hcd->driver->drop_endpoint(hcd, udev,
&cur_alt->endpoint[i]);
if (ret < 0)
goto reset;
}
/* Add all the endpoints in the new alt setting */
for (i = 0; i < new_alt->desc.bNumEndpoints; i++) {
ret = hcd->driver->add_endpoint(hcd, udev,
&new_alt->endpoint[i]);
if (ret < 0)
goto reset;
}
}
ret = hcd->driver->check_bandwidth(hcd, udev);
reset:
if (ret < 0)
hcd->driver->reset_bandwidth(hcd, udev);
return ret;
}
/* Disables the endpoint: synchronizes with the hcd to make sure all
* endpoint state is gone from hardware. usb_hcd_flush_endpoint() must
* have been called previously. Use for set_configuration, set_interface,
* driver removal, physical disconnect.
*
* example: a qh stored in ep->hcpriv, holding state related to endpoint
* type, maxpacket size, toggle, halt status, and scheduling.
*/
void usb_hcd_disable_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd;
might_sleep();
hcd = bus_to_hcd(udev->bus);
if (hcd->driver->endpoint_disable)
hcd->driver->endpoint_disable(hcd, ep);
}
/**
* usb_hcd_reset_endpoint - reset host endpoint state
* @udev: USB device.
* @ep: the endpoint to reset.
*
* Resets any host endpoint state such as the toggle bit, sequence
* number and current window.
*/
void usb_hcd_reset_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (hcd->driver->endpoint_reset)
hcd->driver->endpoint_reset(hcd, ep);
else {
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
int is_control = usb_endpoint_xfer_control(&ep->desc);
usb_settoggle(udev, epnum, is_out, 0);
if (is_control)
usb_settoggle(udev, epnum, !is_out, 0);
}
}
/**
* usb_alloc_streams - allocate bulk endpoint stream IDs.
* @interface: alternate setting that includes all endpoints.
* @eps: array of endpoints that need streams.
* @num_eps: number of endpoints in the array.
* @num_streams: number of streams to allocate.
* @mem_flags: flags hcd should use to allocate memory.
*
* Sets up a group of bulk endpoints to have @num_streams stream IDs available.
* Drivers may queue multiple transfers to different stream IDs, which may
* complete in a different order than they were queued.
*
* Return: On success, the number of allocated streams. On failure, a negative
* error code.
*/
int usb_alloc_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
return -EINVAL;
if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
if (dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
for (i = 0; i < num_eps; i++) {
/* Streams only apply to bulk endpoints. */
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
return -EINVAL;
/* Re-alloc is not allowed */
if (eps[i]->streams)
return -EINVAL;
}
ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
num_streams, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = ret;
return ret;
}
EXPORT_SYMBOL_GPL(usb_alloc_streams);
/**
* usb_free_streams - free bulk endpoint stream IDs.
* @interface: alternate setting that includes all endpoints.
* @eps: array of endpoints to remove streams from.
* @num_eps: number of endpoints in the array.
* @mem_flags: flags hcd should use to allocate memory.
*
* Reverts a group of bulk endpoints back to not using stream IDs.
* Can fail if we are given bad arguments, or HCD is broken.
*
* Return: 0 on success. On failure, a negative error code.
*/
int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
struct usb_hcd *hcd;
struct usb_device *dev;
int i, ret;
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
/* Double-free is not allowed */
for (i = 0; i < num_eps; i++)
if (!eps[i] || !eps[i]->streams)
return -EINVAL;
ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
if (ret < 0)
return ret;
for (i = 0; i < num_eps; i++)
eps[i]->streams = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_free_streams);
/* Protect against drivers that try to unlink URBs after the device
* is gone, by waiting until all unlinks for @udev are finished.
* Since we don't currently track URBs by device, simply wait until
* nothing is running in the locked region of usb_hcd_unlink_urb().
*/
void usb_hcd_synchronize_unlinks(struct usb_device *udev)
{
spin_lock_irq(&hcd_urb_unlink_lock);
spin_unlock_irq(&hcd_urb_unlink_lock);
}
/*-------------------------------------------------------------------------*/
/* called in any context */
int usb_hcd_get_frame_number (struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!HCD_RH_RUNNING(hcd))
return -ESHUTDOWN;
return hcd->driver->get_frame_number (hcd);
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_HCD_TEST_MODE
static void usb_ehset_completion(struct urb *urb)
{
struct completion *done = urb->context;
complete(done);
}
/*
* Allocate and initialize a control URB. This request will be used by the
* EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
* of the GetDescriptor request are sent 15 seconds after the SETUP stage.
* Return NULL if failed.
*/
static struct urb *request_single_step_set_feature_urb(
struct usb_device *udev,
void *dr,
void *buf,
struct completion *done)
{
struct urb *urb;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
urb->pipe = usb_rcvctrlpipe(udev, 0);
urb->ep = &udev->ep0;
urb->dev = udev;
urb->setup_packet = (void *)dr;
urb->transfer_buffer = buf;
urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
urb->complete = usb_ehset_completion;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
urb->transfer_flags = URB_DIR_IN;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
if (map_urb_for_dma(hcd, urb, GFP_KERNEL)) {
usb_put_urb(urb);
usb_free_urb(urb);
return NULL;
}
urb->context = done;
return urb;
}
int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
{
int retval = -ENOMEM;
struct usb_ctrlrequest *dr;
struct urb *urb;
struct usb_device *udev;
struct usb_device_descriptor *buf;
DECLARE_COMPLETION_ONSTACK(done);
/* Obtain udev of the rhub's child port */
udev = usb_hub_find_child(hcd->self.root_hub, port);
if (!udev) {
dev_err(hcd->self.controller, "No device attached to the RootHub\n");
return -ENODEV;
}
buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr) {
kfree(buf);
return -ENOMEM;
}
/* Fill Setup packet for GetDescriptor */
dr->bRequestType = USB_DIR_IN;
dr->bRequest = USB_REQ_GET_DESCRIPTOR;
dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
dr->wIndex = 0;
dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
if (!urb)
goto cleanup;
/* Submit just the SETUP stage */
retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 1);
if (retval)
goto out1;
if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
usb_kill_urb(urb);
retval = -ETIMEDOUT;
dev_err(hcd->self.controller,
"%s SETUP stage timed out on ep0\n", __func__);
goto out1;
}
msleep(15 * 1000);
/* Complete remaining DATA and STATUS stages using the same URB */
urb->status = -EINPROGRESS;
usb_get_urb(urb);
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
retval = hcd->driver->submit_single_step_set_feature(hcd, urb, 0);
if (!retval && !wait_for_completion_timeout(&done,
msecs_to_jiffies(2000))) {
usb_kill_urb(urb);
retval = -ETIMEDOUT;
dev_err(hcd->self.controller,
"%s IN stage timed out on ep0\n", __func__);
}
out1:
usb_free_urb(urb);
cleanup:
kfree(dr);
kfree(buf);
return retval;
}
EXPORT_SYMBOL_GPL(ehset_single_step_set_feature);
#endif /* CONFIG_USB_HCD_TEST_MODE */
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_PM
int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
{
struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
int status;
int old_state = hcd->state;
dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
rhdev->do_remote_wakeup);
if (HCD_DEAD(hcd)) {
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
return 0;
}
if (!hcd->driver->bus_suspend) {
status = -ENOENT;
} else {
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
hcd->state = HC_STATE_QUIESCING;
status = hcd->driver->bus_suspend(hcd);
}
if (status == 0) {
usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
hcd->state = HC_STATE_SUSPENDED;
if (!PMSG_IS_AUTO(msg))
usb_phy_roothub_suspend(hcd->self.sysdev,
hcd->phy_roothub);
/* Did we race with a root-hub wakeup event? */
if (rhdev->do_remote_wakeup) {
char buffer[6];
status = hcd->driver->hub_status_data(hcd, buffer);
if (status != 0) {
dev_dbg(&rhdev->dev, "suspend raced with wakeup event\n");
hcd_bus_resume(rhdev, PMSG_AUTO_RESUME);
status = -EBUSY;
}
}
} else {
spin_lock_irq(&hcd_root_hub_lock);
if (!HCD_DEAD(hcd)) {
set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
hcd->state = old_state;
}
spin_unlock_irq(&hcd_root_hub_lock);
dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
"suspend", status);
}
return status;
}
int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
{
struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
int status;
int old_state = hcd->state;
dev_dbg(&rhdev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
if (HCD_DEAD(hcd)) {
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
return 0;
}
if (!PMSG_IS_AUTO(msg)) {
status = usb_phy_roothub_resume(hcd->self.sysdev,
hcd->phy_roothub);
if (status)
return status;
}
if (!hcd->driver->bus_resume)
return -ENOENT;
if (HCD_RH_RUNNING(hcd))
return 0;
hcd->state = HC_STATE_RESUMING;
status = hcd->driver->bus_resume(hcd);
clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
if (status == 0)
status = usb_phy_roothub_calibrate(hcd->phy_roothub);
if (status == 0) {
struct usb_device *udev;
int port1;
spin_lock_irq(&hcd_root_hub_lock);
if (!HCD_DEAD(hcd)) {
usb_set_device_state(rhdev, rhdev->actconfig
? USB_STATE_CONFIGURED
: USB_STATE_ADDRESS);
set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&hcd_root_hub_lock);
/*
* Check whether any of the enabled ports on the root hub are
* unsuspended. If they are then a TRSMRCY delay is needed
* (this is what the USB-2 spec calls a "global resume").
* Otherwise we can skip the delay.
*/
usb_hub_for_each_child(rhdev, port1, udev) {
if (udev->state != USB_STATE_NOTATTACHED &&
!udev->port_is_suspended) {
usleep_range(10000, 11000); /* TRSMRCY */
break;
}
}
} else {
hcd->state = old_state;
usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
"resume", status);
if (status != -ESHUTDOWN)
usb_hc_died(hcd);
}
return status;
}
/* Workqueue routine for root-hub remote wakeup */
static void hcd_resume_work(struct work_struct *work)
{
struct usb_hcd *hcd = container_of(work, struct usb_hcd, wakeup_work);
struct usb_device *udev = hcd->self.root_hub;
usb_remote_wakeup(udev);
}
/**
* usb_hcd_resume_root_hub - called by HCD to resume its root hub
* @hcd: host controller for this root hub
*
* The USB host controller calls this function when its root hub is
* suspended (with the remote wakeup feature enabled) and a remote
* wakeup request is received. The routine submits a workqueue request
* to resume the root hub (that is, manage its downstream ports again).
*/
void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
{
unsigned long flags;
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_OTG
/**
* usb_bus_start_enum - start immediate enumeration (for OTG)
* @bus: the bus (must use hcd framework)
* @port_num: 1-based number of port; usually bus->otg_port
* Context: atomic
*
* Starts enumeration, with an immediate reset followed later by
* hub_wq identifying and possibly configuring the device.
* This is needed by OTG controller drivers, where it helps meet
* HNP protocol timing requirements for starting a port reset.
*
* Return: 0 if successful.
*/
int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num)
{
struct usb_hcd *hcd;
int status = -EOPNOTSUPP;
/* NOTE: since HNP can't start by grabbing the bus's address0_sem,
* boards with root hubs hooked up to internal devices (instead of
* just the OTG port) may need more attention to resetting...
*/
hcd = bus_to_hcd(bus);
if (port_num && hcd->driver->start_port_reset)
status = hcd->driver->start_port_reset(hcd, port_num);
/* allocate hub_wq shortly after (first) root port reset finishes;
* it may issue others, until at least 50 msecs have passed.
*/
if (status == 0)
mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(10));
return status;
}
EXPORT_SYMBOL_GPL(usb_bus_start_enum);
#endif
/*-------------------------------------------------------------------------*/
/**
* usb_hcd_irq - hook IRQs to HCD framework (bus glue)
* @irq: the IRQ being raised
* @__hcd: pointer to the HCD whose IRQ is being signaled
*
* If the controller isn't HALTed, calls the driver's irq handler.
* Checks whether the controller is now dead.
*
* Return: %IRQ_HANDLED if the IRQ was handled. %IRQ_NONE otherwise.
*/
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
struct usb_hcd *hcd = __hcd;
irqreturn_t rc;
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
else
rc = IRQ_HANDLED;
return rc;
}
EXPORT_SYMBOL_GPL(usb_hcd_irq);
/*-------------------------------------------------------------------------*/
/* Workqueue routine for when the root-hub has died. */
static void hcd_died_work(struct work_struct *work)
{
struct usb_hcd *hcd = container_of(work, struct usb_hcd, died_work);
static char *env[] = {
"ERROR=DEAD",
NULL
};
/* Notify user space that the host controller has died */
kobject_uevent_env(&hcd->self.root_hub->dev.kobj, KOBJ_OFFLINE, env);
}
/**
* usb_hc_died - report abnormal shutdown of a host controller (bus glue)
* @hcd: pointer to the HCD representing the controller
*
* This is called by bus glue to report a USB host controller that died
* while operations may still have been pending. It's called automatically
* by the PCI glue, so only glue for non-PCI busses should need to call it.
*
* Only call this function with the primary HCD.
*/
void usb_hc_died (struct usb_hcd *hcd)
{
unsigned long flags;
dev_err (hcd->self.controller, "HC died; cleaning up\n");
spin_lock_irqsave (&hcd_root_hub_lock, flags);
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* make hub_wq clean up old urbs and devices */
usb_set_device_state (hcd->self.root_hub,
USB_STATE_NOTATTACHED);
usb_kick_hub_wq(hcd->self.root_hub);
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
/* make hub_wq clean up old urbs and devices */
usb_set_device_state(hcd->self.root_hub,
USB_STATE_NOTATTACHED);
usb_kick_hub_wq(hcd->self.root_hub);
}
}
/* Handle the case where this function gets called with a shared HCD */
if (usb_hcd_is_primary_hcd(hcd))
schedule_work(&hcd->died_work);
else
schedule_work(&hcd->primary_hcd->died_work);
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
/* Make sure that the other roothub is also deallocated. */
}
EXPORT_SYMBOL_GPL (usb_hc_died);
/*-------------------------------------------------------------------------*/
static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
{
spin_lock_init(&bh->lock);
INIT_LIST_HEAD(&bh->head);
tasklet_setup(&bh->bh, usb_giveback_urb_bh);
}
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
struct device *sysdev, struct device *dev, const char *bus_name,
struct usb_hcd *primary_hcd)
{
struct usb_hcd *hcd;
hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
if (!hcd)
return NULL;
if (primary_hcd == NULL) {
hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
GFP_KERNEL);
if (!hcd->address0_mutex) {
kfree(hcd);
dev_dbg(dev, "hcd address0 mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->address0_mutex);
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
kfree(hcd->address0_mutex);
kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->bandwidth_mutex);
dev_set_drvdata(dev, hcd);
} else {
mutex_lock(&usb_port_peer_mutex);
hcd->address0_mutex = primary_hcd->address0_mutex;
hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
hcd->primary_hcd = primary_hcd;
primary_hcd->primary_hcd = primary_hcd;
hcd->shared_hcd = primary_hcd;
primary_hcd->shared_hcd = hcd;
mutex_unlock(&usb_port_peer_mutex);
}
kref_init(&hcd->kref);
usb_bus_init(&hcd->self);
hcd->self.controller = dev;
hcd->self.sysdev = sysdev;
hcd->self.bus_name = bus_name;
timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
INIT_WORK(&hcd->died_work, hcd_died_work);
hcd->driver = driver;
hcd->speed = driver->flags & HCD_MASK;
hcd->product_desc = (driver->product_desc) ? driver->product_desc :
"USB Host Controller";
return hcd;
}
EXPORT_SYMBOL_GPL(__usb_create_hcd);
/**
* usb_create_shared_hcd - create and initialize an HCD structure
* @driver: HC driver that will use this hcd
* @dev: device for this HC, stored in hcd->self.controller
* @bus_name: value to store in hcd->self.bus_name
* @primary_hcd: a pointer to the usb_hcd structure that is sharing the
* PCI device. Only allocate certain resources for the primary HCD
*
* Context: task context, might sleep.
*
* Allocate a struct usb_hcd, with extra space at the end for the
* HC driver's private data. Initialize the generic members of the
* hcd structure.
*
* Return: On success, a pointer to the created and initialized HCD structure.
* On failure (e.g. if memory is unavailable), %NULL.
*/
struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name,
struct usb_hcd *primary_hcd)
{
return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
}
EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
/**
* usb_create_hcd - create and initialize an HCD structure
* @driver: HC driver that will use this hcd
* @dev: device for this HC, stored in hcd->self.controller
* @bus_name: value to store in hcd->self.bus_name
*
* Context: task context, might sleep.
*
* Allocate a struct usb_hcd, with extra space at the end for the
* HC driver's private data. Initialize the generic members of the
* hcd structure.
*
* Return: On success, a pointer to the created and initialized HCD
* structure. On failure (e.g. if memory is unavailable), %NULL.
*/
struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name)
{
return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
}
EXPORT_SYMBOL_GPL(usb_create_hcd);
/*
* Roothubs that share one PCI device must also share the bandwidth mutex.
* Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
* deallocated.
*
* Make sure to deallocate the bandwidth_mutex only when the last HCD is
* freed. When hcd_release() is called for either hcd in a peer set,
* invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
*/
static void hcd_release(struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
peer->shared_hcd = NULL;
peer->primary_hcd = NULL;
} else {
kfree(hcd->address0_mutex);
kfree(hcd->bandwidth_mutex);
}
mutex_unlock(&usb_port_peer_mutex);
kfree(hcd);
}
struct usb_hcd *usb_get_hcd (struct usb_hcd *hcd)
{
if (hcd)
kref_get (&hcd->kref);
return hcd;
}
EXPORT_SYMBOL_GPL(usb_get_hcd);
void usb_put_hcd (struct usb_hcd *hcd)
{
if (hcd)
kref_put (&hcd->kref, hcd_release);
}
EXPORT_SYMBOL_GPL(usb_put_hcd);
int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
{
if (!hcd->primary_hcd)
return 1;
return hcd == hcd->primary_hcd;
}
EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
{
if (!hcd->driver->find_raw_port_number)
return port1;
return hcd->driver->find_raw_port_number(hcd, port1);
}
static int usb_hcd_request_irqs(struct usb_hcd *hcd,
unsigned int irqnum, unsigned long irqflags)
{
int retval;
if (hcd->driver->irq) {
snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
hcd->driver->description, hcd->self.busnum);
retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
hcd->irq_descr, hcd);
if (retval != 0) {
dev_err(hcd->self.controller,
"request interrupt %d failed\n",
irqnum);
return retval;
}
hcd->irq = irqnum;
dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
(hcd->driver->flags & HCD_MEMORY) ?
"io mem" : "io port",
(unsigned long long)hcd->rsrc_start);
} else {
hcd->irq = 0;
if (hcd->rsrc_start)
dev_info(hcd->self.controller, "%s 0x%08llx\n",
(hcd->driver->flags & HCD_MEMORY) ?
"io mem" : "io port",
(unsigned long long)hcd->rsrc_start);
}
return 0;
}
/*
* Before we free this root hub, flush in-flight peering attempts
* and disable peer lookups
*/
static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
{
struct usb_device *rhdev;
mutex_lock(&usb_port_peer_mutex);
rhdev = hcd->self.root_hub;
hcd->self.root_hub = NULL;
mutex_unlock(&usb_port_peer_mutex);
usb_put_dev(rhdev);
}
/**
* usb_stop_hcd - Halt the HCD
* @hcd: the usb_hcd that has to be halted
*
* Stop the root-hub polling timer and invoke the HCD's ->stop callback.
*/
static void usb_stop_hcd(struct usb_hcd *hcd)
{
hcd->rh_pollable = 0;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
hcd->driver->stop(hcd);
hcd->state = HC_STATE_HALT;
/* In case the HCD restarted the timer, stop it again. */
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
del_timer_sync(&hcd->rh_timer);
}
/**
* usb_add_hcd - finish generic HCD structure initialization and register
* @hcd: the usb_hcd structure to initialize
* @irqnum: Interrupt line to allocate
* @irqflags: Interrupt type flags
*
* Finish the remaining parts of generic HCD initialization: allocate the
* buffers of consistent memory, register the bus, request the IRQ line,
* and call the driver's reset() and start() routines.
*/
int usb_add_hcd(struct usb_hcd *hcd,
unsigned int irqnum, unsigned long irqflags)
{
int retval;
struct usb_device *rhdev;
struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
if (IS_ERR(hcd->phy_roothub))
return PTR_ERR(hcd->phy_roothub);
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
return retval;
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST_SS);
if (retval)
retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
PHY_MODE_USB_HOST);
if (retval)
goto err_usb_phy_roothub_power_on;
retval = usb_phy_roothub_power_on(hcd->phy_roothub);
if (retval)
goto err_usb_phy_roothub_power_on;
}
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
switch (authorized_default) {
case USB_AUTHORIZE_NONE:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
break;
case USB_AUTHORIZE_INTERNAL:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
break;
case USB_AUTHORIZE_ALL:
case USB_AUTHORIZE_WIRED:
default:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
break;
}
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* per default all interfaces are authorized */
set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
/* HC is in reset state, but accessible. Now do the one-time init,
* bottom up so that hcds can customize the root hubs before hub_wq
* starts talking to them. (Note, bus id is assigned early too.)
*/
retval = hcd_buffer_create(hcd);
if (retval != 0) {
dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
goto err_create_buf;
}
retval = usb_register_bus(&hcd->self);
if (retval < 0)
goto err_register_bus;
rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
if (rhdev == NULL) {
dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
retval = -ENOMEM;
goto err_allocate_root_hub;
}
mutex_lock(&usb_port_peer_mutex);
hcd->self.root_hub = rhdev;
mutex_unlock(&usb_port_peer_mutex);
rhdev->rx_lanes = 1;
rhdev->tx_lanes = 1;
rhdev->ssp_rate = USB_SSP_GEN_UNKNOWN;
switch (hcd->speed) {
case HCD_USB11:
rhdev->speed = USB_SPEED_FULL;
break;
case HCD_USB2:
rhdev->speed = USB_SPEED_HIGH;
break;
case HCD_USB3:
rhdev->speed = USB_SPEED_SUPER;
break;
case HCD_USB32:
rhdev->rx_lanes = 2;
rhdev->tx_lanes = 2;
rhdev->ssp_rate = USB_SSP_GEN_2x2;
rhdev->speed = USB_SPEED_SUPER_PLUS;
break;
case HCD_USB31:
rhdev->ssp_rate = USB_SSP_GEN_2x1;
rhdev->speed = USB_SPEED_SUPER_PLUS;
break;
default:
retval = -EINVAL;
goto err_set_rh_speed;
}
/* wakeup flag init defaults to "everything works" for root hubs,
* but drivers can override it in reset() if needed, along with
* recording the overall controller's system wakeup capability.
*/
device_set_wakeup_capable(&rhdev->dev, 1);
/* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is
* registered. But since the controller can die at any time,
* let's initialize the flag before touching the hardware.
*/
set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
/* "reset" is misnamed; its role is now one-time init. the controller
* should already have been reset (and boot firmware kicked off etc).
*/
if (hcd->driver->reset) {
retval = hcd->driver->reset(hcd);
if (retval < 0) {
dev_err(hcd->self.controller, "can't setup: %d\n",
retval);
goto err_hcd_driver_setup;
}
}
hcd->rh_pollable = 1;
retval = usb_phy_roothub_calibrate(hcd->phy_roothub);
if (retval)
goto err_hcd_driver_setup;
/* NOTE: root hub and controller capabilities may not be the same */
if (device_can_wakeup(hcd->self.controller)
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
/* initialize tasklets */
init_giveback_urb_bh(&hcd->high_prio_bh);
hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
/* enable irqs just before we start the controller,
* if the BIOS provides legacy PCI irqs.
*/
if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
if (retval)
goto err_request_irq;
}
hcd->state = HC_STATE_RUNNING;
retval = hcd->driver->start(hcd);
if (retval < 0) {
dev_err(hcd->self.controller, "startup error %d\n", retval);
goto err_hcd_driver_start;
}
/* starting here, usbcore will pay attention to the shared HCD roothub */
shared_hcd = hcd->shared_hcd;
if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
retval = register_root_hub(shared_hcd);
if (retval != 0)
goto err_register_root_hub;
if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
usb_hcd_poll_rh_status(shared_hcd);
}
/* starting here, usbcore will pay attention to this root hub */
if (!HCD_DEFER_RH_REGISTER(hcd)) {
retval = register_root_hub(hcd);
if (retval != 0)
goto err_register_root_hub;
if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
usb_hcd_poll_rh_status(hcd);
}
return retval;
err_register_root_hub:
usb_stop_hcd(hcd);
err_hcd_driver_start:
if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
free_irq(irqnum, hcd);
err_request_irq:
err_hcd_driver_setup:
err_set_rh_speed:
usb_put_invalidate_rhdev(hcd);
err_allocate_root_hub:
usb_deregister_bus(&hcd->self);
err_register_bus:
hcd_buffer_destroy(hcd);
err_create_buf:
usb_phy_roothub_power_off(hcd->phy_roothub);
err_usb_phy_roothub_power_on:
usb_phy_roothub_exit(hcd->phy_roothub);
return retval;
}
EXPORT_SYMBOL_GPL(usb_add_hcd);
/**
* usb_remove_hcd - shutdown processing for generic HCDs
* @hcd: the usb_hcd structure to remove
*
* Context: task context, might sleep.
*
* Disconnects the root hub, then reverses the effects of usb_add_hcd(),
* invoking the HCD's stop() method.
*/
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev;
bool rh_registered;
if (!hcd) {
pr_debug("%s: hcd is NULL\n", __func__);
return;
}
rhdev = hcd->self.root_hub;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
usb_get_dev(rhdev);
clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
if (HC_IS_RUNNING (hcd->state))
hcd->state = HC_STATE_QUIESCING;
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
#ifdef CONFIG_PM
cancel_work_sync(&hcd->wakeup_work);
#endif
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
if (rh_registered)
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
* tasklet_kill() isn't needed here because:
* - driver's disconnect() called from usb_disconnect() should
* make sure its URBs are completed during the disconnect()
* callback
*
* - it is too late to run complete() here since driver may have
* been removed already now
*/
/* Prevent any more root-hub status calls from the timer.
* The HCD might still restart the timer (if a port status change
* interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
* the hub_status_data() callback.
*/
usb_stop_hcd(hcd);
if (usb_hcd_is_primary_hcd(hcd)) {
if (hcd->irq > 0)
free_irq(hcd->irq, hcd);
}
usb_deregister_bus(&hcd->self);
hcd_buffer_destroy(hcd);
usb_phy_roothub_power_off(hcd->phy_roothub);
usb_phy_roothub_exit(hcd->phy_roothub);
usb_put_invalidate_rhdev(hcd);
hcd->flags = 0;
}
EXPORT_SYMBOL_GPL(usb_remove_hcd);
void
usb_hcd_platform_shutdown(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
/* No need for pm_runtime_put(), we're shutting down */
pm_runtime_get_sync(&dev->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
dma_addr_t dma, size_t size)
{
int err;
void *local_mem;
hcd->localmem_pool = devm_gen_pool_create(hcd->self.sysdev, 4,
dev_to_node(hcd->self.sysdev),
dev_name(hcd->self.sysdev));
if (IS_ERR(hcd->localmem_pool))
return PTR_ERR(hcd->localmem_pool);
/*
* if a physical SRAM address was passed, map it, otherwise
* allocate system memory as a buffer.
*/
if (phys_addr)
local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
size, MEMREMAP_WC);
else
local_mem = dmam_alloc_attrs(hcd->self.sysdev, size, &dma,
GFP_KERNEL,
DMA_ATTR_WRITE_COMBINE);
if (IS_ERR_OR_NULL(local_mem)) {
if (!local_mem)
return -ENOMEM;
return PTR_ERR(local_mem);
}
/*
* Here we pass a dma_addr_t but the arg type is a phys_addr_t.
* It's not backed by system memory and thus there's no kernel mapping
* for it.
*/
err = gen_pool_add_virt(hcd->localmem_pool, (unsigned long)local_mem,
dma, size, dev_to_node(hcd->self.sysdev));
if (err < 0) {
dev_err(hcd->self.sysdev, "gen_pool_add_virt failed with %d\n",
err);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_setup_local_mem);
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_USB_MON)
const struct usb_mon_operations *mon_ops;
/*
* The registration is unlocked.
* We do it this way because we do not want to lock in hot paths.
*
* Notice that the code is minimally error-proof. Because usbmon needs
* symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
*/
int usb_mon_register(const struct usb_mon_operations *ops)
{
if (mon_ops)
return -EBUSY;
mon_ops = ops;
mb();
return 0;
}
EXPORT_SYMBOL_GPL (usb_mon_register);
void usb_mon_deregister (void)
{
if (mon_ops == NULL) {
printk(KERN_ERR "USB: monitor was not registered\n");
return;
}
mon_ops = NULL;
mb();
}
EXPORT_SYMBOL_GPL (usb_mon_deregister);
#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
| linux-master | drivers/usb/core/hcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* message.c - synchronous message handling
*
* Released under the GPLv2 only.
*/
#include <linux/acpi.h>
#include <linux/pci.h> /* for scatterlist macros */
#include <linux/usb.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/timer.h>
#include <linux/ctype.h>
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/usb/of.h>
#include <asm/byteorder.h>
#include "usb.h"
static void cancel_async_set_config(struct usb_device *udev);
struct api_context {
struct completion done;
int status;
};
static void usb_api_blocking_completion(struct urb *urb)
{
struct api_context *ctx = urb->context;
ctx->status = urb->status;
complete(&ctx->done);
}
/*
* Starts urb and waits for completion or timeout. Note that this call
* is NOT interruptible. Many device driver i/o requests should be
* interruptible and therefore these drivers should implement their
* own interruptible routines.
*/
static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
{
struct api_context ctx;
unsigned long expire;
int retval;
init_completion(&ctx.done);
urb->context = &ctx;
urb->actual_length = 0;
retval = usb_submit_urb(urb, GFP_NOIO);
if (unlikely(retval))
goto out;
expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
if (!wait_for_completion_timeout(&ctx.done, expire)) {
usb_kill_urb(urb);
retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status);
dev_dbg(&urb->dev->dev,
"%s timed out on ep%d%s len=%u/%u\n",
current->comm,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
urb->actual_length,
urb->transfer_buffer_length);
} else
retval = ctx.status;
out:
if (actual_length)
*actual_length = urb->actual_length;
usb_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------*/
/* returns status (negative) or length (positive) */
static int usb_internal_control_msg(struct usb_device *usb_dev,
unsigned int pipe,
struct usb_ctrlrequest *cmd,
void *data, int len, int timeout)
{
struct urb *urb;
int retv;
int length;
urb = usb_alloc_urb(0, GFP_NOIO);
if (!urb)
return -ENOMEM;
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
retv = usb_start_wait_urb(urb, timeout, &length);
if (retv < 0)
return retv;
else
return length;
}
/**
* usb_control_msg - Builds a control urb, sends it off and waits for completion
* @dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @request: USB message request value
* @requesttype: USB message request type value
* @value: USB message value
* @index: USB message index value
* @data: pointer to the data to send
* @size: length in bytes of the data to send
* @timeout: time in msecs to wait for the message to complete before timing
* out (if 0 the wait is forever)
*
* Context: task context, might sleep.
*
* This function sends a simple control message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb(). If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return: If successful, the number of bytes transferred. Otherwise, a negative
* error number.
*/
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
__u8 requesttype, __u16 value, __u16 index, void *data,
__u16 size, int timeout)
{
struct usb_ctrlrequest *dr;
int ret;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
return -ENOMEM;
dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
/* Linger a bit, prior to the next control message. */
if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
msleep(200);
kfree(dr);
return ret;
}
EXPORT_SYMBOL_GPL(usb_control_msg);
/**
* usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion
* @dev: pointer to the usb device to send the message to
* @endpoint: endpoint to send the message to
* @request: USB message request value
* @requesttype: USB message request type value
* @value: USB message value
* @index: USB message index value
* @driver_data: pointer to the data to send
* @size: length in bytes of the data to send
* @timeout: time in msecs to wait for the message to complete before timing
* out (if 0 the wait is forever)
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
*
* This function sends a control message to a specified endpoint that is not
* expected to fill in a response (i.e. a "send message") and waits for the
* message to complete, or timeout.
*
* Do not use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb(). If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* The data pointer can be made to a reference on the stack, or anywhere else,
* as it will not be modified at all. This does not have the restriction that
* usb_control_msg() has where the data pointer must be to dynamically allocated
* memory (i.e. memory that can be successfully DMAed to a device).
*
* Return: If successful, 0 is returned, Otherwise, a negative error number.
*/
int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
__u8 requesttype, __u16 value, __u16 index,
const void *driver_data, __u16 size, int timeout,
gfp_t memflags)
{
unsigned int pipe = usb_sndctrlpipe(dev, endpoint);
int ret;
u8 *data = NULL;
if (size) {
data = kmemdup(driver_data, size, memflags);
if (!data)
return -ENOMEM;
}
ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
data, size, timeout);
kfree(data);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(usb_control_msg_send);
/**
* usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion
* @dev: pointer to the usb device to send the message to
* @endpoint: endpoint to send the message to
* @request: USB message request value
* @requesttype: USB message request type value
* @value: USB message value
* @index: USB message index value
* @driver_data: pointer to the data to be filled in by the message
* @size: length in bytes of the data to be received
* @timeout: time in msecs to wait for the message to complete before timing
* out (if 0 the wait is forever)
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
*
* This function sends a control message to a specified endpoint that is
* expected to fill in a response (i.e. a "receive message") and waits for the
* message to complete, or timeout.
*
* Do not use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb(). If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* The data pointer can be made to a reference on the stack, or anywhere else
* that can be successfully written to. This function does not have the
* restriction that usb_control_msg() has where the data pointer must be to
* dynamically allocated memory (i.e. memory that can be successfully DMAed to a
* device).
*
* The "whole" message must be properly received from the device in order for
* this function to be successful. If a device returns less than the expected
* amount of data, then the function will fail. Do not use this for messages
* where a variable amount of data might be returned.
*
* Return: If successful, 0 is returned, Otherwise, a negative error number.
*/
int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request,
__u8 requesttype, __u16 value, __u16 index,
void *driver_data, __u16 size, int timeout,
gfp_t memflags)
{
unsigned int pipe = usb_rcvctrlpipe(dev, endpoint);
int ret;
u8 *data;
if (!size || !driver_data)
return -EINVAL;
data = kmalloc(size, memflags);
if (!data)
return -ENOMEM;
ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
data, size, timeout);
if (ret < 0)
goto exit;
if (ret == size) {
memcpy(driver_data, data, size);
ret = 0;
} else {
ret = -EREMOTEIO;
}
exit:
kfree(data);
return ret;
}
EXPORT_SYMBOL_GPL(usb_control_msg_recv);
/**
* usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: task context, might sleep.
*
* This function sends a simple interrupt message to a specified endpoint and
* waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
}
EXPORT_SYMBOL_GPL(usb_interrupt_msg);
/**
* usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion
* @usb_dev: pointer to the usb device to send the message to
* @pipe: endpoint "pipe" to send the message to
* @data: pointer to the data to send
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
* @timeout: time in msecs to wait for the message to complete before
* timing out (if 0 the wait is forever)
*
* Context: task context, might sleep.
*
* This function sends a simple bulk message to a specified endpoint
* and waits for the message to complete, or timeout.
*
* Don't use this function from within an interrupt context. If you need
* an asynchronous message, or need to send a message from within interrupt
* context, use usb_submit_urb() If a thread in your driver uses this call,
* make sure your disconnect() method can wait for it to complete. Since you
* don't have a handle on the URB used, you can't cancel the request.
*
* Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl,
* users are forced to abuse this routine by using it to submit URBs for
* interrupt endpoints. We will take the liberty of creating an interrupt URB
* (with the default interval) if the target is an interrupt endpoint.
*
* Return:
* If successful, 0. Otherwise a negative error number. The number of actual
* bytes transferred will be stored in the @actual_length parameter.
*
*/
int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
{
struct urb *urb;
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(usb_dev, pipe);
if (!ep || len < 0)
return -EINVAL;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT) {
pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
usb_fill_int_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL,
ep->desc.bInterval);
} else
usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL);
return usb_start_wait_urb(urb, timeout, actual_length);
}
EXPORT_SYMBOL_GPL(usb_bulk_msg);
/*-------------------------------------------------------------------*/
static void sg_clean(struct usb_sg_request *io)
{
if (io->urbs) {
while (io->entries--)
usb_free_urb(io->urbs[io->entries]);
kfree(io->urbs);
io->urbs = NULL;
}
io->dev = NULL;
}
static void sg_complete(struct urb *urb)
{
unsigned long flags;
struct usb_sg_request *io = urb->context;
int status = urb->status;
spin_lock_irqsave(&io->lock, flags);
/* In 2.5 we require hcds' endpoint queues not to progress after fault
* reports, until the completion callback (this!) returns. That lets
* device driver code (like this routine) unlink queued urbs first,
* if it needs to, since the HC won't work on them at all. So it's
* not possible for page N+1 to overwrite page N, and so on.
*
* That's only for "hard" faults; "soft" faults (unlinks) sometimes
* complete before the HCD can get requests away from hardware,
* though never during cleanup after a hard fault.
*/
if (io->status
&& (io->status != -ECONNRESET
|| status != -ECONNRESET)
&& urb->actual_length) {
dev_err(io->dev->bus->controller,
"dev %s ep%d%s scatterlist error %d/%d\n",
io->dev->devpath,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
status, io->status);
/* BUG (); */
}
if (io->status == 0 && status && status != -ECONNRESET) {
int i, found, retval;
io->status = status;
/* the previous urbs, and this one, completed already.
* unlink pending urbs so they won't rx/tx bad data.
* careful: unlink can sometimes be synchronous...
*/
spin_unlock_irqrestore(&io->lock, flags);
for (i = 0, found = 0; i < io->entries; i++) {
if (!io->urbs[i])
continue;
if (found) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
retval != -EBUSY &&
retval != -EIDRM)
dev_err(&io->dev->dev,
"%s, unlink --> %d\n",
__func__, retval);
} else if (urb == io->urbs[i])
found = 1;
}
spin_lock_irqsave(&io->lock, flags);
}
/* on the last completion, signal usb_sg_wait() */
io->bytes += urb->actual_length;
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
/**
* usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request
* @io: request block being initialized. until usb_sg_wait() returns,
* treat this as a pointer to an opaque block of memory,
* @dev: the usb device that will send or receive the data
* @pipe: endpoint "pipe" used to transfer the data
* @period: polling rate for interrupt endpoints, in frames or
* (for high speed endpoints) microframes; ignored for bulk
* @sg: scatterlist entries
* @nents: how many entries in the scatterlist
* @length: how many bytes to send from the scatterlist, or zero to
* send every byte identified in the list.
* @mem_flags: SLAB_* flags affecting memory allocations in this call
*
* This initializes a scatter/gather request, allocating resources such as
* I/O mappings and urb memory (except maybe memory used by USB controller
* drivers).
*
* The request must be issued using usb_sg_wait(), which waits for the I/O to
* complete (or to be canceled) and then cleans up all resources allocated by
* usb_sg_init().
*
* The request may be canceled with usb_sg_cancel(), either before or after
* usb_sg_wait() is called.
*
* Return: Zero for success, else a negative errno value.
*/
int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
int nents, size_t length, gfp_t mem_flags)
{
int i;
int urb_flags;
int use_sg;
if (!io || !dev || !sg
|| usb_pipecontrol(pipe)
|| usb_pipeisoc(pipe)
|| nents <= 0)
return -EINVAL;
spin_lock_init(&io->lock);
io->dev = dev;
io->pipe = pipe;
if (dev->bus->sg_tablesize > 0) {
use_sg = true;
io->entries = 1;
} else {
use_sg = false;
io->entries = nents;
}
/* initialize all the urbs we'll use */
io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags);
if (!io->urbs)
goto nomem;
urb_flags = URB_NO_INTERRUPT;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
for_each_sg(sg, sg, io->entries, i) {
struct urb *urb;
unsigned len;
urb = usb_alloc_urb(0, mem_flags);
if (!urb) {
io->entries = i;
goto nomem;
}
io->urbs[i] = urb;
urb->dev = NULL;
urb->pipe = pipe;
urb->interval = period;
urb->transfer_flags = urb_flags;
urb->complete = sg_complete;
urb->context = io;
urb->sg = sg;
if (use_sg) {
/* There is no single transfer buffer */
urb->transfer_buffer = NULL;
urb->num_sgs = nents;
/* A length of zero means transfer the whole sg list */
len = length;
if (len == 0) {
struct scatterlist *sg2;
int j;
for_each_sg(sg, sg2, nents, j)
len += sg2->length;
}
} else {
/*
* Some systems can't use DMA; they use PIO instead.
* For their sakes, transfer_buffer is set whenever
* possible.
*/
if (!PageHighMem(sg_page(sg)))
urb->transfer_buffer = sg_virt(sg);
else
urb->transfer_buffer = NULL;
len = sg->length;
if (length) {
len = min_t(size_t, len, length);
length -= len;
if (length == 0)
io->entries = i + 1;
}
}
urb->transfer_buffer_length = len;
}
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
return 0;
nomem:
sg_clean(io);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_sg_init);
/**
* usb_sg_wait - synchronously execute scatter/gather request
* @io: request block handle, as initialized with usb_sg_init().
* some fields become accessible when this call returns.
*
* Context: task context, might sleep.
*
* This function blocks until the specified I/O operation completes. It
* leverages the grouping of the related I/O requests to get good transfer
* rates, by queueing the requests. At higher speeds, such queuing can
* significantly improve USB throughput.
*
* There are three kinds of completion for this function.
*
* (1) success, where io->status is zero. The number of io->bytes
* transferred is as requested.
* (2) error, where io->status is a negative errno value. The number
* of io->bytes transferred before the error is usually less
* than requested, and can be nonzero.
* (3) cancellation, a type of error with status -ECONNRESET that
* is initiated by usb_sg_cancel().
*
* When this function returns, all memory allocated through usb_sg_init() or
* this call will have been freed. The request block parameter may still be
* passed to usb_sg_cancel(), or it may be freed. It could also be
* reinitialized and then reused.
*
* Data Transfer Rates:
*
* Bulk transfers are valid for full or high speed endpoints.
* The best full speed data rate is 19 packets of 64 bytes each
* per frame, or 1216 bytes per millisecond.
* The best high speed data rate is 13 packets of 512 bytes each
* per microframe, or 52 KBytes per millisecond.
*
* The reason to use interrupt transfers through this API would most likely
* be to reserve high speed bandwidth, where up to 24 KBytes per millisecond
* could be transferred. That capability is less useful for low or full
* speed interrupt endpoints, which allow at most one packet per millisecond,
* of at most 8 or 64 bytes (respectively).
*
* It is not necessary to call this function to reserve bandwidth for devices
* under an xHCI host controller, as the bandwidth is reserved when the
* configuration or interface alt setting is selected.
*/
void usb_sg_wait(struct usb_sg_request *io)
{
int i;
int entries = io->entries;
/* queue the urbs. */
spin_lock_irq(&io->lock);
i = 0;
while (i < entries && !io->status) {
int retval;
io->urbs[i]->dev = io->dev;
spin_unlock_irq(&io->lock);
retval = usb_submit_urb(io->urbs[i], GFP_NOIO);
switch (retval) {
/* maybe we retrying will recover */
case -ENXIO: /* hc didn't queue this one */
case -EAGAIN:
case -ENOMEM:
retval = 0;
yield();
break;
/* no error? continue immediately.
*
* NOTE: to work better with UHCI (4K I/O buffer may
* need 3K of TDs) it may be good to limit how many
* URBs are queued at once; N milliseconds?
*/
case 0:
++i;
cpu_relax();
break;
/* fail any uncompleted urbs */
default:
io->urbs[i]->status = retval;
dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
__func__, retval);
usb_sg_cancel(io);
}
spin_lock_irq(&io->lock);
if (retval && (io->status == 0 || io->status == -ECONNRESET))
io->status = retval;
}
io->count -= entries - i;
if (io->count == 0)
complete(&io->complete);
spin_unlock_irq(&io->lock);
/* OK, yes, this could be packaged as non-blocking.
* So could the submit loop above ... but it's easier to
* solve neither problem than to solve both!
*/
wait_for_completion(&io->complete);
sg_clean(io);
}
EXPORT_SYMBOL_GPL(usb_sg_wait);
/**
* usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait()
* @io: request block, initialized with usb_sg_init()
*
* This stops a request after it has been started by usb_sg_wait().
* It can also prevents one initialized by usb_sg_init() from starting,
* so that call just frees resources allocated to the request.
*/
void usb_sg_cancel(struct usb_sg_request *io)
{
unsigned long flags;
int i, retval;
spin_lock_irqsave(&io->lock, flags);
if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS
&& retval != -ENODEV
&& retval != -EBUSY
&& retval != -EIDRM)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
spin_lock_irqsave(&io->lock, flags);
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
/*-------------------------------------------------------------------*/
/**
* usb_get_descriptor - issues a generic GET_DESCRIPTOR request
* @dev: the device whose descriptor is being retrieved
* @type: the descriptor type (USB_DT_*)
* @index: the number of the descriptor
* @buf: where to put the descriptor
* @size: how big is "buf"?
*
* Context: task context, might sleep.
*
* Gets a USB descriptor. Convenience functions exist to simplify
* getting some types of descriptors. Use
* usb_get_string() or usb_string() for USB_DT_STRING.
* Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG)
* are part of the device structure.
* In addition to a number of USB-standard descriptors, some
* devices also use class-specific or vendor-specific descriptors.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_descriptor(struct usb_device *dev, unsigned char type,
unsigned char index, void *buf, int size)
{
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
/* retry on length 0 or error; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(type << 8) + index, 0, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result <= 0 && result != -ETIMEDOUT)
continue;
if (result > 1 && ((u8 *)buf)[1] != type) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
EXPORT_SYMBOL_GPL(usb_get_descriptor);
/**
* usb_get_string - gets a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @langid: code for language chosen (from string descriptor zero)
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
*
* Context: task context, might sleep.
*
* Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character,
* in little-endian byte order).
* The usb_string() function will often be a convenient way to turn
* these strings into kernel-printable form.
*
* Strings may be referenced in device, configuration, interface, or other
* descriptors, and could also be used in vendor-specific ways.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
static int usb_get_string(struct usb_device *dev, unsigned short langid,
unsigned char index, void *buf, int size)
{
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
(USB_DT_STRING << 8) + index, langid, buf, size,
USB_CTRL_GET_TIMEOUT);
if (result == 0 || result == -EPIPE)
continue;
if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) {
result = -ENODATA;
continue;
}
break;
}
return result;
}
static void usb_try_string_workarounds(unsigned char *buf, int *length)
{
int newlength, oldlength = *length;
for (newlength = 2; newlength + 1 < oldlength; newlength += 2)
if (!isprint(buf[newlength]) || buf[newlength + 1])
break;
if (newlength > 2) {
buf[0] = newlength;
*length = newlength;
}
}
static int usb_string_sub(struct usb_device *dev, unsigned int langid,
unsigned int index, unsigned char *buf)
{
int rc;
/* Try to read the string descriptor by asking for the maximum
* possible number of bytes */
if (dev->quirks & USB_QUIRK_STRING_FETCH_255)
rc = -EIO;
else
rc = usb_get_string(dev, langid, index, buf, 255);
/* If that failed try to read the descriptor length, then
* ask for just that many bytes */
if (rc < 2) {
rc = usb_get_string(dev, langid, index, buf, 2);
if (rc == 2)
rc = usb_get_string(dev, langid, index, buf, buf[0]);
}
if (rc >= 2) {
if (!buf[0] && !buf[1])
usb_try_string_workarounds(buf, &rc);
/* There might be extra junk at the end of the descriptor */
if (buf[0] < rc)
rc = buf[0];
rc = rc - (rc & 1); /* force a multiple of two */
}
if (rc < 2)
rc = (rc < 0 ? rc : -EINVAL);
return rc;
}
static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
{
int err;
if (dev->have_langid)
return 0;
if (dev->string_langid < 0)
return -EPIPE;
err = usb_string_sub(dev, 0, 0, tbuf);
/* If the string was reported but is malformed, default to english
* (0x0409) */
if (err == -ENODATA || (err > 0 && err < 4)) {
dev->string_langid = 0x0409;
dev->have_langid = 1;
dev_err(&dev->dev,
"language id specifier not provided by device, defaulting to English\n");
return 0;
}
/* In case of all other errors, we assume the device is not able to
* deal with strings at all. Set string_langid to -1 in order to
* prevent any string to be retrieved from the device */
if (err < 0) {
dev_info(&dev->dev, "string descriptor 0 read error: %d\n",
err);
dev->string_langid = -1;
return -EPIPE;
}
/* always use the first langid listed */
dev->string_langid = tbuf[2] | (tbuf[3] << 8);
dev->have_langid = 1;
dev_dbg(&dev->dev, "default language 0x%04x\n",
dev->string_langid);
return 0;
}
/**
* usb_string - returns UTF-8 version of a string descriptor
* @dev: the device whose string descriptor is being retrieved
* @index: the number of the descriptor
* @buf: where to put the string
* @size: how big is "buf"?
*
* Context: task context, might sleep.
*
* This converts the UTF-16LE encoded strings returned by devices, from
* usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
* that are more usable in most kernel contexts. Note that this function
* chooses strings in the first language supported by the device.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: length of the string (>= 0) or usb_control_msg status (< 0).
*/
int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
{
unsigned char *tbuf;
int err;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
if (index <= 0 || index >= 256)
return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
err = usb_get_langid(dev, tbuf);
if (err < 0)
goto errout;
err = usb_string_sub(dev, dev->string_langid, index, tbuf);
if (err < 0)
goto errout;
size--; /* leave room for trailing NULL char in output buffer */
err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
UTF16_LITTLE_ENDIAN, buf, size);
buf[err] = 0;
if (tbuf[1] != USB_DT_STRING)
dev_dbg(&dev->dev,
"wrong descriptor type %02x for string %d (\"%s\")\n",
tbuf[1], index, buf);
errout:
kfree(tbuf);
return err;
}
EXPORT_SYMBOL_GPL(usb_string);
/* one UTF-8-encoded 16-bit character has at most three bytes */
#define MAX_USB_STRING_SIZE (127 * 3 + 1)
/**
* usb_cache_string - read a string descriptor and cache it for later use
* @udev: the device whose string descriptor is being read
* @index: the descriptor index
*
* Return: A pointer to a kmalloc'ed buffer containing the descriptor string,
* or %NULL if the index is 0 or the string could not be read.
*/
char *usb_cache_string(struct usb_device *udev, int index)
{
char *buf;
char *smallbuf = NULL;
int len;
if (index <= 0)
return NULL;
buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
if (buf) {
len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
if (len > 0) {
smallbuf = kmalloc(++len, GFP_NOIO);
if (!smallbuf)
return buf;
memcpy(smallbuf, buf, len);
}
kfree(buf);
}
return smallbuf;
}
EXPORT_SYMBOL_GPL(usb_cache_string);
/*
* usb_get_device_descriptor - read the device descriptor
* @udev: the device whose device descriptor should be read
*
* Context: task context, might sleep.
*
* Not exported, only for use by the core. If drivers really want to read
* the device descriptor directly, they can call usb_get_descriptor() with
* type = USB_DT_DEVICE and index = 0.
*
* Returns: a pointer to a dynamically allocated usb_device_descriptor
* structure (which the caller must deallocate), or an ERR_PTR value.
*/
struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev)
{
struct usb_device_descriptor *desc;
int ret;
desc = kmalloc(sizeof(*desc), GFP_NOIO);
if (!desc)
return ERR_PTR(-ENOMEM);
ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc));
if (ret == sizeof(*desc))
return desc;
if (ret >= 0)
ret = -EMSGSIZE;
kfree(desc);
return ERR_PTR(ret);
}
/*
* usb_set_isoch_delay - informs the device of the packet transmit delay
* @dev: the device whose delay is to be informed
* Context: task context, might sleep
*
* Since this is an optional request, we don't bother if it fails.
*/
int usb_set_isoch_delay(struct usb_device *dev)
{
/* skip hub devices */
if (dev->descriptor.bDeviceClass == USB_CLASS_HUB)
return 0;
/* skip non-SS/non-SSP devices */
if (dev->speed < USB_SPEED_SUPER)
return 0;
return usb_control_msg_send(dev, 0,
USB_REQ_SET_ISOCH_DELAY,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
dev->hub_delay, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT,
GFP_NOIO);
}
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
* @recip: USB_RECIP_*; for device, interface, or endpoint
* @type: USB_STATUS_TYPE_*; for standard or PTM status types
* @target: zero (for device), else interface or endpoint number
* @data: pointer to two bytes of bitmap data
*
* Context: task context, might sleep.
*
* Returns device, interface, or endpoint status. Normally only of
* interest to see if the device is self powered, or has enabled the
* remote wakeup facility; or whether a bulk or interrupt endpoint
* is halted ("stalled").
*
* Bits in these status bitmaps are set using the SET_FEATURE request,
* and cleared using the CLEAR_FEATURE request. The usb_clear_halt()
* function should be used to clear halt ("stall") status.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Returns 0 and the status value in *@data (in host byte order) on success,
* or else the status code from the underlying usb_control_msg() call.
*/
int usb_get_status(struct usb_device *dev, int recip, int type, int target,
void *data)
{
int ret;
void *status;
int length;
switch (type) {
case USB_STATUS_TYPE_STANDARD:
length = 2;
break;
case USB_STATUS_TYPE_PTM:
if (recip != USB_RECIP_DEVICE)
return -EINVAL;
length = 4;
break;
default:
return -EINVAL;
}
status = kmalloc(length, GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD,
target, status, length, USB_CTRL_GET_TIMEOUT);
switch (ret) {
case 4:
if (type != USB_STATUS_TYPE_PTM) {
ret = -EIO;
break;
}
*(u32 *) data = le32_to_cpu(*(__le32 *) status);
ret = 0;
break;
case 2:
if (type != USB_STATUS_TYPE_STANDARD) {
ret = -EIO;
break;
}
*(u16 *) data = le16_to_cpu(*(__le16 *) status);
ret = 0;
break;
default:
ret = -EIO;
}
kfree(status);
return ret;
}
EXPORT_SYMBOL_GPL(usb_get_status);
/**
* usb_clear_halt - tells device to clear endpoint halt/stall condition
* @dev: device whose endpoint is halted
* @pipe: endpoint "pipe" being cleared
*
* Context: task context, might sleep.
*
* This is used to clear halt conditions for bulk and interrupt endpoints,
* as reported by URB completion status. Endpoints that are halted are
* sometimes referred to as being "stalled". Such endpoints are unable
* to transmit or receive data until the halt status is cleared. Any URBs
* queued for such an endpoint should normally be unlinked by the driver
* before clearing the halt condition, as described in sections 5.7.5
* and 5.8.5 of the USB 2.0 spec.
*
* Note that control and isochronous endpoints don't halt, although control
* endpoints report "protocol stall" (for unsupported requests) using the
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_clear_halt(struct usb_device *dev, int pipe)
{
int result;
int endp = usb_pipeendpoint(pipe);
if (usb_pipein(pipe))
endp |= USB_DIR_IN;
/* we don't care if it wasn't halted first. in fact some devices
* (like some ibmcam model 1 units) seem to expect hosts to make
* this request for iso endpoints, which can't halt!
*/
result = usb_control_msg_send(dev, 0,
USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, endp, NULL, 0,
USB_CTRL_SET_TIMEOUT, GFP_NOIO);
/* don't un-halt or force to DATA0 except on success */
if (result)
return result;
/* NOTE: seems like Microsoft and Apple don't bother verifying
* the clear "took", so some devices could lock up if you check...
* such as the Hagiwara FlashGate DUAL. So we won't bother.
*
* NOTE: make sure the logic here doesn't diverge much from
* the copy in usb-storage, for as long as we need two copies.
*/
usb_reset_endpoint(dev, endp);
return 0;
}
EXPORT_SYMBOL_GPL(usb_clear_halt);
static int create_intf_ep_devs(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (intf->ep_devs_created || intf->unregistering)
return 0;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
(void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev);
intf->ep_devs_created = 1;
return 0;
}
static void remove_intf_ep_devs(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
if (!intf->ep_devs_created)
return;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_remove_ep_devs(&alt->endpoint[i]);
intf->ep_devs_created = 0;
}
/**
* usb_disable_endpoint -- Disable an endpoint by address
* @dev: the device whose endpoint is being disabled
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables the endpoint for URB submission and nukes all pending URBs.
* If @reset_hardware is set then also deallocates hcd/hardware state
* for the endpoint.
*/
void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
bool reset_hardware)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (!dev)
return;
if (usb_endpoint_out(epaddr)) {
ep = dev->ep_out[epnum];
if (reset_hardware && epnum != 0)
dev->ep_out[epnum] = NULL;
} else {
ep = dev->ep_in[epnum];
if (reset_hardware && epnum != 0)
dev->ep_in[epnum] = NULL;
}
if (ep) {
ep->enabled = 0;
usb_hcd_flush_endpoint(dev, ep);
if (reset_hardware)
usb_hcd_disable_endpoint(dev, ep);
}
}
/**
* usb_reset_endpoint - Reset an endpoint's state.
* @dev: the device whose endpoint is to be reset
* @epaddr: the endpoint's address. Endpoint number for output,
* endpoint number + USB_DIR_IN for input
*
* Resets any host-side endpoint state such as the toggle bit,
* sequence number or current window.
*/
void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr)
{
unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
struct usb_host_endpoint *ep;
if (usb_endpoint_out(epaddr))
ep = dev->ep_out[epnum];
else
ep = dev->ep_in[epnum];
if (ep)
usb_hcd_reset_endpoint(dev, ep);
}
EXPORT_SYMBOL_GPL(usb_reset_endpoint);
/**
* usb_disable_interface -- Disable all endpoints for an interface
* @dev: the device whose interface is being disabled
* @intf: pointer to the interface descriptor
* @reset_hardware: flag to erase any endpoint state stored in the
* controller hardware
*
* Disables all the endpoints for the interface's current altsetting.
*/
void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
bool reset_hardware)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
usb_disable_endpoint(dev,
alt->endpoint[i].desc.bEndpointAddress,
reset_hardware);
}
}
/*
* usb_disable_device_endpoints -- Disable all endpoints for a device
* @dev: the device whose endpoints are being disabled
* @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
*/
static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0)
{
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int i;
if (hcd->driver->check_bandwidth) {
/* First pass: Cancel URBs, leave endpoint pointers intact. */
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, false);
usb_disable_endpoint(dev, i + USB_DIR_IN, false);
}
/* Remove endpoints from the host controller internal state */
mutex_lock(hcd->bandwidth_mutex);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
mutex_unlock(hcd->bandwidth_mutex);
}
/* Second pass: remove endpoint pointers */
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
}
}
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
* @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
*
* Disables all the device's endpoints, potentially including endpoint 0.
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
/*
* FIXME: In order to avoid self-deadlock involving the
* bandwidth_mutex, we have to mark all the interfaces
* before unregistering any of them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
dev->actconfig->interface[i]->unregistering = 1;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
/* remove this interface if it has been registered */
interface = dev->actconfig->interface[i];
if (!device_is_registered(&interface->dev))
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
* try to access them.
*/
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
put_device(&dev->actconfig->interface[i]->dev);
dev->actconfig->interface[i] = NULL;
}
usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
dev->actconfig = NULL;
if (dev->state == USB_STATE_CONFIGURED)
usb_set_device_state(dev, USB_STATE_ADDRESS);
}
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
usb_disable_device_endpoints(dev, skip_ep0);
}
/**
* usb_enable_endpoint - Enable an endpoint for USB communications
* @dev: the device whose interface is being enabled
* @ep: the endpoint
* @reset_ep: flag to reset the endpoint state
*
* Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers.
* For control endpoints, both the input and output sides are handled.
*/
void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep,
bool reset_ep)
{
int epnum = usb_endpoint_num(&ep->desc);
int is_out = usb_endpoint_dir_out(&ep->desc);
int is_control = usb_endpoint_xfer_control(&ep->desc);
if (reset_ep)
usb_hcd_reset_endpoint(dev, ep);
if (is_out || is_control)
dev->ep_out[epnum] = ep;
if (!is_out || is_control)
dev->ep_in[epnum] = ep;
ep->enabled = 1;
}
/**
* usb_enable_interface - Enable all the endpoints for an interface
* @dev: the device whose interface is being enabled
* @intf: pointer to the interface descriptor
* @reset_eps: flag to reset the endpoints' state
*
* Enables all the endpoints for the interface's current altsetting.
*/
void usb_enable_interface(struct usb_device *dev,
struct usb_interface *intf, bool reset_eps)
{
struct usb_host_interface *alt = intf->cur_altsetting;
int i;
for (i = 0; i < alt->desc.bNumEndpoints; ++i)
usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps);
}
/**
* usb_set_interface - Makes a particular alternate setting be current
* @dev: the device whose interface is being updated
* @interface: the interface being updated
* @alternate: the setting being chosen.
*
* Context: task context, might sleep.
*
* This is used to enable data transfers on interfaces that may not
* be enabled by default. Not all devices support such configurability.
* Only the driver bound to an interface may change its setting.
*
* Within any given configuration, each interface may have several
* alternative settings. These are often used to control levels of
* bandwidth consumption. For example, the default setting for a high
* speed interrupt endpoint may not send more than 64 bytes per microframe,
* while interrupt transfers of up to 3KBytes per microframe are legal.
* Also, isochronous endpoints may never be part of an
* interface's default setting. To access such bandwidth, alternate
* interface settings must be made current.
*
* Note that in the Linux USB subsystem, bandwidth associated with
* an endpoint in a given alternate setting is not reserved until an URB
* is submitted that needs that bandwidth. Some other operating systems
* allocate bandwidth early, when a configuration is chosen.
*
* xHCI reserves bandwidth and configures the alternate setting in
* usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
* may be disabled. Drivers cannot rely on any particular alternate
* setting being in effect after a failure.
*
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
* (perhaps forced by unlinking).
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_set_interface(struct usb_device *dev, int interface, int alternate)
{
struct usb_interface *iface;
struct usb_host_interface *alt;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int i, ret, manual = 0;
unsigned int epaddr;
unsigned int pipe;
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
iface = usb_ifnum_to_if(dev, interface);
if (!iface) {
dev_dbg(&dev->dev, "selecting invalid interface %d\n",
interface);
return -EINVAL;
}
if (iface->unregistering)
return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
/*
* usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
* including freeing dropped endpoint ring buffers.
* Make sure the interface endpoints are flushed before that
*/
usb_disable_interface(dev, iface, false);
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new alt setting is installed,
* so that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* Changing alt-setting also frees any allocated streams */
for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++)
iface->cur_altsetting->endpoint[i].streams = 0;
ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
if (ret < 0) {
dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
alternate);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
if (dev->quirks & USB_QUIRK_NO_SET_INTF)
ret = -EPIPE;
else
ret = usb_control_msg_send(dev, 0,
USB_REQ_SET_INTERFACE,
USB_RECIP_INTERFACE, alternate,
interface, NULL, 0, 5000,
GFP_NOIO);
/* 9.4.10 says devices don't need this and are free to STALL the
* request if the interface only has one alternate setting.
*/
if (ret == -EPIPE && iface->num_altsetting == 1) {
dev_dbg(&dev->dev,
"manual set_interface for iface %d, alt %d\n",
interface, alternate);
manual = 1;
} else if (ret) {
/* Re-instate the old alt setting */
usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return ret;
}
mutex_unlock(hcd->bandwidth_mutex);
/* FIXME drivers shouldn't need to replicate/bugfix the logic here
* when they implement async or easily-killable versions of this or
* other "should-be-internal" functions (like clear_halt).
* should hcd+usbcore postprocess control requests?
*/
/* prevent submissions using previous endpoint settings */
if (iface->cur_altsetting != alt) {
remove_intf_ep_devs(iface);
usb_remove_sysfs_intf_files(iface);
}
usb_disable_interface(dev, iface, true);
iface->cur_altsetting = alt;
/* Now that the interface is installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* If the interface only has one altsetting and the device didn't
* accept the request, we attempt to carry out the equivalent action
* by manually clearing the HALT feature for each endpoint in the
* new altsetting.
*/
if (manual) {
for (i = 0; i < alt->desc.bNumEndpoints; i++) {
epaddr = alt->endpoint[i].desc.bEndpointAddress;
pipe = __create_pipe(dev,
USB_ENDPOINT_NUMBER_MASK & epaddr) |
(usb_endpoint_out(epaddr) ?
USB_DIR_OUT : USB_DIR_IN);
usb_clear_halt(dev, pipe);
}
}
/* 9.1.1.5: reset toggles for all endpoints in the new altsetting
*
* Note:
* Despite EP0 is always present in all interfaces/AS, the list of
* endpoints from the descriptor does not contain EP0. Due to its
* omnipresence one might expect EP0 being considered "affected" by
* any SetInterface request and hence assume toggles need to be reset.
* However, EP0 toggles are re-synced for every individual transfer
* during the SETUP stage - hence EP0 toggles are "don't care" here.
* (Likewise, EP0 never "halts" on well designed devices.)
*/
usb_enable_interface(dev, iface, true);
if (device_is_registered(&iface->dev)) {
usb_create_sysfs_intf_files(iface);
create_intf_ep_devs(iface);
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_interface);
/**
* usb_reset_configuration - lightweight device reset
* @dev: the device whose configuration is being reset
*
* This issues a standard SET_CONFIGURATION request to the device using
* the current configuration. The effect is to reset most USB-related
* state in the device, including interface altsettings (reset to zero),
* endpoint halts (cleared), and endpoint state (only for bulk and interrupt
* endpoints). Other usbcore state is unchanged, including bindings of
* usb device drivers to interfaces.
*
* Because this affects multiple interfaces, avoid using this with composite
* (multi-interface) devices. Instead, the driver for each interface may
* use usb_set_interface() on the interfaces it claims. Be careful though;
* some devices don't support the SET_INTERFACE request, and others won't
* reset all the interface state (notably endpoint state). Resetting the whole
* configuration would affect other drivers' interfaces.
*
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
*
* If this routine fails the device will probably be in an unusable state
* with endpoints disabled, and interfaces only partially enabled.
*/
int usb_reset_configuration(struct usb_device *dev)
{
int i, retval;
struct usb_host_config *config;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
/* caller must have locked the device and must own
* the usb bus readlock (so driver bindings are stable);
* calls during probe() are fine
*/
usb_disable_device_endpoints(dev, 1); /* skip ep0*/
config = dev->actconfig;
retval = 0;
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the configuration is reset, so
* that the xHCI driver can recalculate the U1/U2 timeouts.
*/
if (usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
/* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */
retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL);
if (retval < 0) {
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT,
GFP_NOIO);
if (retval) {
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
}
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
if (alt != intf->cur_altsetting) {
remove_intf_ep_devs(intf);
usb_remove_sysfs_intf_files(intf);
}
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
if (device_is_registered(&intf->dev)) {
usb_create_sysfs_intf_files(intf);
create_intf_ep_devs(intf);
}
}
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_reset_configuration);
static void usb_release_interface(struct device *dev)
{
struct usb_interface *intf = to_usb_interface(dev);
struct usb_interface_cache *intfc =
altsetting_to_usb_interface_cache(intf->altsetting);
kref_put(&intfc->ref, usb_release_interface_cache);
usb_put_dev(interface_to_usbdev(intf));
of_node_put(dev->of_node);
kfree(intf);
}
/*
* usb_deauthorize_interface - deauthorize an USB interface
*
* @intf: USB interface structure
*/
void usb_deauthorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
device_lock(dev->parent);
if (intf->authorized) {
device_lock(dev);
intf->authorized = 0;
device_unlock(dev);
usb_forced_unbind_intf(intf);
}
device_unlock(dev->parent);
}
/*
* usb_authorize_interface - authorize an USB interface
*
* @intf: USB interface structure
*/
void usb_authorize_interface(struct usb_interface *intf)
{
struct device *dev = &intf->dev;
if (!intf->authorized) {
device_lock(dev);
intf->authorized = 1; /* authorize interface */
device_unlock(dev);
}
}
static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct usb_device *usb_dev;
const struct usb_interface *intf;
const struct usb_host_interface *alt;
intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
alt = intf->cur_altsetting;
if (add_uevent_var(env, "INTERFACE=%d/%d/%d",
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol))
return -ENOMEM;
if (add_uevent_var(env,
"MODALIAS=usb:"
"v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X",
le16_to_cpu(usb_dev->descriptor.idVendor),
le16_to_cpu(usb_dev->descriptor.idProduct),
le16_to_cpu(usb_dev->descriptor.bcdDevice),
usb_dev->descriptor.bDeviceClass,
usb_dev->descriptor.bDeviceSubClass,
usb_dev->descriptor.bDeviceProtocol,
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber))
return -ENOMEM;
return 0;
}
struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
};
static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
struct usb_host_config *config,
u8 inum)
{
struct usb_interface_assoc_descriptor *retval = NULL;
struct usb_interface_assoc_descriptor *intf_assoc;
int first_intf;
int last_intf;
int i;
for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) {
intf_assoc = config->intf_assoc[i];
if (intf_assoc->bInterfaceCount == 0)
continue;
first_intf = intf_assoc->bFirstInterface;
last_intf = first_intf + (intf_assoc->bInterfaceCount - 1);
if (inum >= first_intf && inum <= last_intf) {
if (!retval)
retval = intf_assoc;
else
dev_err(&dev->dev, "Interface #%d referenced"
" by multiple IADs\n", inum);
}
}
return retval;
}
/*
* Internal function to queue a device reset
* See usb_queue_reset_device() for more details
*/
static void __usb_queue_reset_device(struct work_struct *ws)
{
int rc;
struct usb_interface *iface =
container_of(ws, struct usb_interface, reset_ws);
struct usb_device *udev = interface_to_usbdev(iface);
rc = usb_lock_device_for_reset(udev, iface);
if (rc >= 0) {
usb_reset_device(udev);
usb_unlock_device(udev);
}
usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */
}
/*
* Internal function to set the wireless_status sysfs attribute
* See usb_set_wireless_status() for more details
*/
static void __usb_wireless_status_intf(struct work_struct *ws)
{
struct usb_interface *iface =
container_of(ws, struct usb_interface, wireless_status_work);
device_lock(iface->dev.parent);
if (iface->sysfs_files_created)
usb_update_wireless_status_attr(iface);
device_unlock(iface->dev.parent);
usb_put_intf(iface); /* Undo _get_ in usb_set_wireless_status() */
}
/**
* usb_set_wireless_status - sets the wireless_status struct member
* @iface: the interface to modify
* @status: the new wireless status
*
* Set the wireless_status struct member to the new value, and emit
* sysfs changes as necessary.
*
* Returns: 0 on success, -EALREADY if already set.
*/
int usb_set_wireless_status(struct usb_interface *iface,
enum usb_wireless_status status)
{
if (iface->wireless_status == status)
return -EALREADY;
usb_get_intf(iface);
iface->wireless_status = status;
schedule_work(&iface->wireless_status_work);
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_wireless_status);
/*
* usb_set_configuration - Makes a particular device setting be current
* @dev: the device whose configuration is being updated
* @configuration: the configuration being chosen.
*
* Context: task context, might sleep. Caller holds device lock.
*
* This is used to enable non-default device modes. Not all devices
* use this kind of configurability; many devices only have one
* configuration.
*
* @configuration is the value of the configuration to be installed.
* According to the USB spec (e.g. section 9.1.1.5), configuration values
* must be non-zero; a value of zero indicates that the device in
* unconfigured. However some devices erroneously use 0 as one of their
* configuration values. To help manage such devices, this routine will
* accept @configuration = -1 as indicating the device should be put in
* an unconfigured state.
*
* USB device configurations may affect Linux interoperability,
* power consumption and the functionality available. For example,
* the default configuration is limited to using 100mA of bus power,
* so that when certain device functionality requires more power,
* and the device is bus powered, that functionality should be in some
* non-default device configuration. Other device modes may also be
* reflected as configuration options, such as whether two ISDN
* channels are available independently; and choosing between open
* standard device protocols (like CDC) or proprietary ones.
*
* Note that a non-authorized device (dev->authorized == 0) will only
* be put in unconfigured mode.
*
* Note that USB has an additional level of device configurability,
* associated with interfaces. That configurability is accessed using
* usb_set_interface().
*
* This call is synchronous. The calling context must be able to sleep,
* must own the device lock, and must not hold the driver model's USB
* bus mutex; usb interface driver probe() methods cannot use this routine.
*
* Returns zero on success, or else the status code returned by the
* underlying call that failed. On successful completion, each interface
* in the original device configuration has been destroyed, and each one
* in the new configuration has been probed by all relevant usb device
* drivers currently known to the kernel.
*/
int usb_set_configuration(struct usb_device *dev, int configuration)
{
int i, ret;
struct usb_host_config *cp = NULL;
struct usb_interface **new_interfaces = NULL;
struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int n, nintf;
if (dev->authorized == 0 || configuration == -1)
configuration = 0;
else {
for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
if (dev->config[i].desc.bConfigurationValue ==
configuration) {
cp = &dev->config[i];
break;
}
}
}
if ((!cp && configuration != 0))
return -EINVAL;
/* The USB spec says configuration 0 means unconfigured.
* But if a device includes a configuration numbered 0,
* we will accept it as a correctly configured state.
* Use -1 if you really want to unconfigure the device.
*/
if (cp && configuration == 0)
dev_warn(&dev->dev, "config 0 descriptor??\n");
/* Allocate memory for new interfaces before doing anything else,
* so that if we run out then nothing will have changed. */
n = nintf = 0;
if (cp) {
nintf = cp->desc.bNumInterfaces;
new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces),
GFP_NOIO);
if (!new_interfaces)
return -ENOMEM;
for (; n < nintf; ++n) {
new_interfaces[n] = kzalloc(
sizeof(struct usb_interface),
GFP_NOIO);
if (!new_interfaces[n]) {
ret = -ENOMEM;
free_interfaces:
while (--n >= 0)
kfree(new_interfaces[n]);
kfree(new_interfaces);
return ret;
}
}
i = dev->bus_mA - usb_get_max_power(dev, cp);
if (i < 0)
dev_warn(&dev->dev, "new config #%d exceeds power "
"limit by %dmA\n",
configuration, -i);
}
/* Wake up the device so we can send it the Set-Config request */
ret = usb_autoresume_device(dev);
if (ret)
goto free_interfaces;
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
/* Get rid of pending async Set-Config requests for this device */
cancel_async_set_config(dev);
/* Make sure we have bandwidth (and available HCD resources) for this
* configuration. Remove endpoints from the schedule if we're dropping
* this configuration to set configuration 0. After this point, the
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
mutex_lock(hcd->bandwidth_mutex);
/* Disable LPM, and re-enable it once the new configuration is
* installed, so that the xHCI driver can recalculate the U1/U2
* timeouts.
*/
if (dev->actconfig && usb_disable_lpm(dev)) {
dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__);
mutex_unlock(hcd->bandwidth_mutex);
ret = -ENOMEM;
goto free_interfaces;
}
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
if (dev->actconfig)
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
usb_autosuspend_device(dev);
goto free_interfaces;
}
/*
* Initialize the new interface structures and the
* hc/hcd/usbcore interface/endpoint state.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface_cache *intfc;
struct usb_interface *intf;
struct usb_host_interface *alt;
u8 ifnum;
cp->interface[i] = intf = new_interfaces[i];
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
intf->authorized = !!HCD_INTF_AUTHORIZED(hcd);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
/* No altsetting 0? We'll assume the first altsetting.
* We could use a GetInterface call, but if a device is
* so non-compliant that it doesn't have altsetting 0
* then I wouldn't trust its reply anyway.
*/
if (!alt)
alt = &intf->altsetting[0];
ifnum = alt->desc.bInterfaceNumber;
intf->intf_assoc = find_iad(dev, cp, ifnum);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
if (usb_of_has_combined_node(dev)) {
device_set_of_node_from_dev(&intf->dev, &dev->dev);
} else {
intf->dev.of_node = usb_of_get_interface_node(dev,
configuration, ifnum);
}
ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev));
intf->dev.driver = NULL;
intf->dev.bus = &usb_bus_type;
intf->dev.type = &usb_if_device_type;
intf->dev.groups = usb_interface_groups;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
INIT_WORK(&intf->wireless_status_work, __usb_wireless_status_intf);
intf->minor = -1;
device_initialize(&intf->dev);
pm_runtime_no_callbacks(&intf->dev);
dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum,
dev->devpath, configuration, ifnum);
usb_get_dev(dev);
}
kfree(new_interfaces);
ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
configuration, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT, GFP_NOIO);
if (ret && cp) {
/*
* All the old state is gone, so what else can we do?
* The device is probably useless now anyway.
*/
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
for (i = 0; i < nintf; ++i) {
usb_disable_interface(dev, cp->interface[i], true);
put_device(&cp->interface[i]->dev);
cp->interface[i] = NULL;
}
cp = NULL;
}
dev->actconfig = cp;
mutex_unlock(hcd->bandwidth_mutex);
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
/* Leave LPM disabled while the device is unconfigured. */
usb_autosuspend_device(dev);
return ret;
}
usb_set_device_state(dev, USB_STATE_CONFIGURED);
if (cp->string == NULL &&
!(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
/* Now that the interfaces are installed, re-enable LPM. */
usb_unlocked_enable_lpm(dev);
/* Enable LTM if it was turned off by usb_disable_device. */
usb_enable_ltm(dev);
/* Now that all the interfaces are set up, register them
* to trigger binding of drivers to interfaces. probe()
* routines may install different altsettings and may
* claim() any interfaces not yet bound. Many class drivers
* need that: CDC, audio, video, etc.
*/
for (i = 0; i < nintf; ++i) {
struct usb_interface *intf = cp->interface[i];
if (intf->dev.of_node &&
!of_device_is_available(intf->dev.of_node)) {
dev_info(&dev->dev, "skipping disabled interface %d\n",
intf->cur_altsetting->desc.bInterfaceNumber);
continue;
}
dev_dbg(&dev->dev,
"adding %s (config #%d, interface %d)\n",
dev_name(&intf->dev), configuration,
intf->cur_altsetting->desc.bInterfaceNumber);
device_enable_async_suspend(&intf->dev);
ret = device_add(&intf->dev);
if (ret != 0) {
dev_err(&dev->dev, "device_add(%s) --> %d\n",
dev_name(&intf->dev), ret);
continue;
}
create_intf_ep_devs(intf);
}
usb_autosuspend_device(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_set_configuration);
static LIST_HEAD(set_config_list);
static DEFINE_SPINLOCK(set_config_lock);
struct set_config_request {
struct usb_device *udev;
int config;
struct work_struct work;
struct list_head node;
};
/* Worker routine for usb_driver_set_configuration() */
static void driver_set_config_work(struct work_struct *work)
{
struct set_config_request *req =
container_of(work, struct set_config_request, work);
struct usb_device *udev = req->udev;
usb_lock_device(udev);
spin_lock(&set_config_lock);
list_del(&req->node);
spin_unlock(&set_config_lock);
if (req->config >= -1) /* Is req still valid? */
usb_set_configuration(udev, req->config);
usb_unlock_device(udev);
usb_put_dev(udev);
kfree(req);
}
/* Cancel pending Set-Config requests for a device whose configuration
* was just changed
*/
static void cancel_async_set_config(struct usb_device *udev)
{
struct set_config_request *req;
spin_lock(&set_config_lock);
list_for_each_entry(req, &set_config_list, node) {
if (req->udev == udev)
req->config = -999; /* Mark as cancelled */
}
spin_unlock(&set_config_lock);
}
/**
* usb_driver_set_configuration - Provide a way for drivers to change device configurations
* @udev: the device whose configuration is being updated
* @config: the configuration being chosen.
* Context: In process context, must be able to sleep
*
* Device interface drivers are not allowed to change device configurations.
* This is because changing configurations will destroy the interface the
* driver is bound to and create new ones; it would be like a floppy-disk
* driver telling the computer to replace the floppy-disk drive with a
* tape drive!
*
* Still, in certain specialized circumstances the need may arise. This
* routine gets around the normal restrictions by using a work thread to
* submit the change-config request.
*
* Return: 0 if the request was successfully queued, error code otherwise.
* The caller has no way to know whether the queued request will eventually
* succeed.
*/
int usb_driver_set_configuration(struct usb_device *udev, int config)
{
struct set_config_request *req;
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->udev = udev;
req->config = config;
INIT_WORK(&req->work, driver_set_config_work);
spin_lock(&set_config_lock);
list_add(&req->node, &set_config_list);
spin_unlock(&set_config_lock);
usb_get_dev(udev);
schedule_work(&req->work);
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
/**
* cdc_parse_cdc_header - parse the extra headers present in CDC devices
* @hdr: the place to put the results of the parsing
* @intf: the interface for which parsing is requested
* @buffer: pointer to the extra headers to be parsed
* @buflen: length of the extra headers
*
* This evaluates the extra headers present in CDC devices which
* bind the interfaces for data and control and provide details
* about the capabilities of the device.
*
* Return: number of descriptors parsed or -EINVAL
* if the header is contradictory beyond salvage
*/
int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if ((buflen < elength) || (elength < 3)) {
dev_err(&intf->dev, "invalid descriptor buffer length\n");
break;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
EXPORT_SYMBOL(cdc_parse_cdc_header);
| linux-master | drivers/usb/core/message.c |
// SPDX-License-Identifier: GPL-2.0
/*
* xhci-plat.c - xHCI host controller driver platform Bus Glue.
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* Author: Sebastian Andrzej Siewior <[email protected]>
*
* A lot of code borrowed from the Linux xHCI driver.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-mvebu.h"
static struct hc_driver __read_mostly xhci_plat_hc_driver;
static int xhci_plat_setup(struct usb_hcd *hcd);
static int xhci_plat_start(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
.extra_priv_size = sizeof(struct xhci_plat_priv),
.reset = xhci_plat_setup,
.start = xhci_plat_start,
};
static void xhci_priv_plat_start(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
if (priv->plat_start)
priv->plat_start(hcd);
}
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
if (!priv->init_quirk)
return 0;
return priv->init_quirk(hcd);
}
static int xhci_priv_suspend_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
if (!priv->suspend_quirk)
return 0;
return priv->suspend_quirk(hcd);
}
static int xhci_priv_resume_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
if (!priv->resume_quirk)
return 0;
return priv->resume_quirk(hcd);
}
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
xhci->quirks |= priv->quirks;
}
/* called during probe() after chip reset completes */
static int xhci_plat_setup(struct usb_hcd *hcd)
{
int ret;
ret = xhci_priv_init_quirk(hcd);
if (ret)
return ret;
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
static int xhci_plat_start(struct usb_hcd *hcd)
{
xhci_priv_plat_start(hcd);
return xhci_run(hcd);
}
#ifdef CONFIG_OF
static const struct xhci_plat_priv xhci_plat_marvell_armada = {
.init_quirk = xhci_mvebu_mbus_init_quirk,
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
static const struct xhci_plat_priv xhci_plat_brcm = {
.quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "generic-xhci",
}, {
.compatible = "xhci-platform",
}, {
.compatible = "marvell,armada-375-xhci",
.data = &xhci_plat_marvell_armada,
}, {
.compatible = "marvell,armada-380-xhci",
.data = &xhci_plat_marvell_armada,
}, {
.compatible = "marvell,armada3700-xhci",
.data = &xhci_plat_marvell_armada3700,
}, {
.compatible = "brcm,xhci-brcm-v2",
.data = &xhci_plat_brcm,
}, {
.compatible = "brcm,bcm7445-xhci",
.data = &xhci_plat_brcm,
},
{},
};
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const struct xhci_plat_priv *priv_match)
{
const struct hc_driver *driver;
struct device *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd, *usb3_hcd;
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
if (usb_disabled())
return -ENODEV;
driver = &xhci_plat_hc_driver;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
if (!sysdev)
sysdev = &pdev->dev;
ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), NULL);
if (!hcd) {
ret = -ENOMEM;
goto disable_runtime;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
xhci = hcd_to_xhci(hcd);
xhci->allow_single_roothub = 1;
/*
* Not all platforms have clks so it is not an error if the
* clock do not exist.
*/
xhci->reg_clk = devm_clk_get_optional(&pdev->dev, "reg");
if (IS_ERR(xhci->reg_clk)) {
ret = PTR_ERR(xhci->reg_clk);
goto put_hcd;
}
xhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(xhci->clk)) {
ret = PTR_ERR(xhci->clk);
goto put_hcd;
}
xhci->reset = devm_reset_control_array_get_optional_shared(&pdev->dev);
if (IS_ERR(xhci->reset)) {
ret = PTR_ERR(xhci->reset);
goto put_hcd;
}
ret = reset_control_deassert(xhci->reset);
if (ret)
goto put_hcd;
ret = clk_prepare_enable(xhci->reg_clk);
if (ret)
goto err_reset;
ret = clk_prepare_enable(xhci->clk);
if (ret)
goto disable_reg_clk;
if (priv_match) {
priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
*priv = *priv_match;
}
device_set_wakeup_capable(&pdev->dev, true);
xhci->main_hcd = hcd;
/* imod_interval is the interrupt moderation value in nanoseconds. */
xhci->imod_interval = 40000;
/* Iterate over all parent nodes for finding quirks */
for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
xhci->quirks |= XHCI_HW_LPM_DISABLE;
if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
xhci->quirks |= XHCI_BROKEN_PORT_PED;
device_property_read_u32(tmpdev, "imod-interval-ns",
&xhci->imod_interval);
}
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
goto disable_clk;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
goto disable_clk;
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
xhci->quirks |= XHCI_SG_TRB_CACHE_SIZE_QUIRK;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
if (!xhci_has_one_roothub(xhci)) {
xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
dev_name(&pdev->dev), hcd);
if (!xhci->shared_hcd) {
ret = -ENOMEM;
goto dealloc_usb2_hcd;
}
xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
"usb-phy", 1);
if (IS_ERR(xhci->shared_hcd->usb_phy)) {
xhci->shared_hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(xhci->shared_hcd->usb_phy);
if (ret)
dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
__func__, ret);
}
xhci->shared_hcd->tpl_support = hcd->tpl_support;
}
usb3_hcd = xhci_get_usb3_hcd(xhci);
if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4)
usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) {
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
}
device_enable_async_suspend(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
/*
* Prevent runtime pm from being on as default, users should enable
* runtime pm using power/control in sysfs.
*/
pm_runtime_forbid(&pdev->dev);
return 0;
put_usb3_hcd:
usb_put_hcd(xhci->shared_hcd);
dealloc_usb2_hcd:
usb_remove_hcd(hcd);
disable_usb_phy:
usb_phy_shutdown(hcd->usb_phy);
disable_clk:
clk_disable_unprepare(xhci->clk);
disable_reg_clk:
clk_disable_unprepare(xhci->reg_clk);
err_reset:
reset_control_assert(xhci->reset);
put_hcd:
usb_put_hcd(hcd);
disable_runtime:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(xhci_plat_probe);
static int xhci_generic_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
struct device *sysdev;
int ret;
/*
* sysdev must point to a device that is known to the system firmware
* or PCI hardware. We handle these three cases here:
* 1. xhci_plat comes from firmware
* 2. xhci_plat is child of a device from firmware (dwc3-plat)
* 3. xhci_plat is grandchild of a pci device (dwc3-pci)
*/
for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
if (is_of_node(sysdev->fwnode) ||
is_acpi_device_node(sysdev->fwnode))
break;
else if (dev_is_pci(sysdev))
break;
}
if (!sysdev)
sysdev = &pdev->dev;
if (WARN_ON(!sysdev->dma_mask)) {
/* Platform did not initialize dma_mask */
ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
}
if (pdev->dev.of_node)
priv_match = of_device_get_match_data(&pdev->dev);
else
priv_match = dev_get_platdata(&pdev->dev);
return xhci_plat_probe(pdev, sysdev, priv_match);
}
void xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
struct clk *reg_clk = xhci->reg_clk;
struct usb_hcd *shared_hcd = xhci->shared_hcd;
xhci->xhc_state |= XHCI_STATE_REMOVING;
pm_runtime_get_sync(&dev->dev);
if (shared_hcd) {
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
}
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
if (shared_hcd)
usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
reset_control_assert(xhci->reset);
usb_put_hcd(hcd);
pm_runtime_disable(&dev->dev);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_set_suspended(&dev->dev);
}
EXPORT_SYMBOL_GPL(xhci_plat_remove);
static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
if (pm_runtime_suspended(dev))
pm_runtime_resume(dev);
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;
/*
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend.
*/
ret = xhci_suspend(xhci, device_may_wakeup(dev));
if (ret)
return ret;
if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
clk_disable_unprepare(xhci->clk);
clk_disable_unprepare(xhci->reg_clk);
}
return 0;
}
static int __maybe_unused xhci_plat_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
clk_prepare_enable(xhci->clk);
clk_prepare_enable(xhci->reg_clk);
}
ret = xhci_priv_resume_quirk(hcd);
if (ret)
return ret;
ret = xhci_resume(xhci, PMSG_RESUME);
if (ret)
return ret;
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;
return xhci_suspend(xhci, true);
}
static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
return xhci_resume(xhci, PMSG_AUTO_RESUME);
}
const struct dev_pm_ops xhci_plat_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
xhci_plat_runtime_resume,
NULL)
};
EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
#ifdef CONFIG_ACPI
static const struct acpi_device_id usb_xhci_acpi_match[] = {
/* XHCI-compliant USB Controller */
{ "PNP0D10", },
{ }
};
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
#endif
static struct platform_driver usb_generic_xhci_driver = {
.probe = xhci_generic_plat_probe,
.remove_new = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = &xhci_plat_pm_ops,
.of_match_table = of_match_ptr(usb_xhci_of_match),
.acpi_match_table = ACPI_PTR(usb_xhci_acpi_match),
},
};
MODULE_ALIAS("platform:xhci-hcd");
static int __init xhci_plat_init(void)
{
xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides);
return platform_driver_register(&usb_generic_xhci_driver);
}
module_init(xhci_plat_init);
static void __exit xhci_plat_exit(void)
{
platform_driver_unregister(&usb_generic_xhci_driver);
}
module_exit(xhci_plat_exit);
MODULE_DESCRIPTION("xHCI Platform Host Controller Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/usb/host/xhci-plat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH EHCI host controller driver
*
* Copyright (C) 2010 Paul Mundt
*
* Based on ohci-sh.c and ehci-atmel.c.
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
struct ehci_sh_priv {
struct clk *iclk, *fclk;
struct usb_hcd *hcd;
};
static int ehci_sh_reset(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
return ehci_setup(hcd);
}
static const struct hc_driver ehci_sh_hc_driver = {
.description = hcd_name,
.product_desc = "SuperH EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_USB2 | HCD_DMA | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
*/
.reset = ehci_sh_reset,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_hcd_sh_probe(struct platform_device *pdev)
{
struct resource *res;
struct ehci_sh_priv *priv;
struct usb_hcd *hcd;
int irq, ret;
if (usb_disabled())
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto fail_create_hcd;
}
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
ret = -ENOMEM;
goto fail_create_hcd;
}
hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
priv = devm_kzalloc(&pdev->dev, sizeof(struct ehci_sh_priv),
GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto fail_request_resource;
}
/* These are optional, we don't care if they fail */
priv->fclk = devm_clk_get(&pdev->dev, "usb_fck");
if (IS_ERR(priv->fclk))
priv->fclk = NULL;
priv->iclk = devm_clk_get(&pdev->dev, "usb_ick");
if (IS_ERR(priv->iclk))
priv->iclk = NULL;
clk_enable(priv->fclk);
clk_enable(priv->iclk);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to add hcd");
goto fail_add_hcd;
}
device_wakeup_enable(hcd->self.controller);
priv->hcd = hcd;
platform_set_drvdata(pdev, priv);
return ret;
fail_add_hcd:
clk_disable(priv->iclk);
clk_disable(priv->fclk);
fail_request_resource:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
return ret;
}
static void ehci_hcd_sh_remove(struct platform_device *pdev)
{
struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
struct usb_hcd *hcd = priv->hcd;
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
clk_disable(priv->fclk);
clk_disable(priv->iclk);
}
static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
{
struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
struct usb_hcd *hcd = priv->hcd;
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
static struct platform_driver ehci_hcd_sh_driver = {
.probe = ehci_hcd_sh_probe,
.remove_new = ehci_hcd_sh_remove,
.shutdown = ehci_hcd_sh_shutdown,
.driver = {
.name = "sh_ehci",
},
};
MODULE_ALIAS("platform:sh_ehci");
| linux-master | drivers/usb/host/ehci-sh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom specific Advanced Microcontroller Bus
* Broadcom USB-core driver (BCMA bus glue)
*
* Copyright 2011-2015 Hauke Mehrtens <[email protected]>
* Copyright 2015 Felix Fietkau <[email protected]>
*
* Based on ssb-ohci driver
* Copyright 2007 Michael Buesch <[email protected]>
*
* Derived from the OHCI-PCI driver
* Copyright 1999 Roman Weissgaerber
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
*/
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
MODULE_LICENSE("GPL");
/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
#define USB_BCMA_CLKCTLST_USB_CLK_REQ 0x00000100
struct bcma_hcd_device {
struct bcma_device *core;
struct platform_device *ehci_dev;
struct platform_device *ohci_dev;
struct gpio_desc *gpio_desc;
};
/* Wait for bitmask in a register to get set or cleared.
* timeout is in units of ten-microseconds.
*/
static int bcma_wait_bits(struct bcma_device *dev, u16 reg, u32 bitmask,
int timeout)
{
int i;
u32 val;
for (i = 0; i < timeout; i++) {
val = bcma_read32(dev, reg);
if ((val & bitmask) == bitmask)
return 0;
udelay(10);
}
return -ETIMEDOUT;
}
static void bcma_hcd_4716wa(struct bcma_device *dev)
{
#ifdef CONFIG_BCMA_DRIVER_MIPS
/* Work around for 4716 failures. */
if (dev->bus->chipinfo.id == 0x4716) {
u32 tmp;
tmp = bcma_cpu_clock(&dev->bus->drv_mips);
if (tmp >= 480000000)
tmp = 0x1846b; /* set CDR to 0x11(fast) */
else if (tmp == 453000000)
tmp = 0x1046b; /* set CDR to 0x10(slow) */
else
tmp = 0;
/* Change Shim mdio control reg to fix host not acking at
* high frequencies
*/
if (tmp) {
bcma_write32(dev, 0x524, 0x1); /* write sel to enable */
udelay(500);
bcma_write32(dev, 0x524, tmp);
udelay(500);
bcma_write32(dev, 0x524, 0x4ab);
udelay(500);
bcma_read32(dev, 0x528);
bcma_write32(dev, 0x528, 0x80000000);
}
}
#endif /* CONFIG_BCMA_DRIVER_MIPS */
}
/* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
static void bcma_hcd_init_chip_mips(struct bcma_device *dev)
{
u32 tmp;
/*
* USB 2.0 special considerations:
*
* 1. Since the core supports both OHCI and EHCI functions, it must
* only be reset once.
*
* 2. In addition to the standard SI reset sequence, the Host Control
* Register must be programmed to bring the USB core and various
* phy components out of reset.
*/
if (!bcma_core_is_enabled(dev)) {
bcma_core_enable(dev, 0);
mdelay(10);
if (dev->id.rev >= 5) {
/* Enable Misc PLL */
tmp = bcma_read32(dev, 0x1e0);
tmp |= 0x100;
bcma_write32(dev, 0x1e0, tmp);
if (bcma_wait_bits(dev, 0x1e0, 1 << 24, 100))
printk(KERN_EMERG "Failed to enable misc PPL!\n");
/* Take out of resets */
bcma_write32(dev, 0x200, 0x4ff);
udelay(25);
bcma_write32(dev, 0x200, 0x6ff);
udelay(25);
/* Make sure digital and AFE are locked in USB PHY */
bcma_write32(dev, 0x524, 0x6b);
udelay(50);
tmp = bcma_read32(dev, 0x524);
udelay(50);
bcma_write32(dev, 0x524, 0xab);
udelay(50);
tmp = bcma_read32(dev, 0x524);
udelay(50);
bcma_write32(dev, 0x524, 0x2b);
udelay(50);
tmp = bcma_read32(dev, 0x524);
udelay(50);
bcma_write32(dev, 0x524, 0x10ab);
udelay(50);
tmp = bcma_read32(dev, 0x524);
if (bcma_wait_bits(dev, 0x528, 0xc000, 10000)) {
tmp = bcma_read32(dev, 0x528);
printk(KERN_EMERG
"USB20H mdio_rddata 0x%08x\n", tmp);
}
bcma_write32(dev, 0x528, 0x80000000);
tmp = bcma_read32(dev, 0x314);
udelay(265);
bcma_write32(dev, 0x200, 0x7ff);
udelay(10);
/* Take USB and HSIC out of non-driving modes */
bcma_write32(dev, 0x510, 0);
} else {
bcma_write32(dev, 0x200, 0x7ff);
udelay(1);
}
bcma_hcd_4716wa(dev);
}
}
/*
* bcma_hcd_usb20_old_arm_init - Initialize old USB 2.0 controller on ARM
*
* Old USB 2.0 core is identified as BCMA_CORE_USB20_HOST and was introduced
* long before Northstar devices. It seems some cheaper chipsets like BCM53573
* still use it.
* Initialization of this old core differs between MIPS and ARM.
*/
static int bcma_hcd_usb20_old_arm_init(struct bcma_hcd_device *usb_dev)
{
struct bcma_device *core = usb_dev->core;
struct device *dev = &core->dev;
struct bcma_device *pmu_core;
usleep_range(10000, 20000);
if (core->id.rev < 5)
return 0;
pmu_core = bcma_find_core(core->bus, BCMA_CORE_PMU);
if (!pmu_core) {
dev_err(dev, "Could not find PMU core\n");
return -ENOENT;
}
/* Take USB core out of reset */
bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK | BCMA_IOCTL_FGC);
usleep_range(100, 200);
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
usleep_range(100, 200);
bcma_awrite32(core, BCMA_RESET_CTL, 0);
usleep_range(100, 200);
bcma_awrite32(core, BCMA_IOCTL, BCMA_IOCTL_CLK);
usleep_range(100, 200);
/* Enable Misc PLL */
bcma_write32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT |
BCMA_CLKCTLST_HQCLKREQ |
USB_BCMA_CLKCTLST_USB_CLK_REQ);
usleep_range(100, 200);
bcma_write32(core, 0x510, 0xc7f85000);
bcma_write32(core, 0x510, 0xc7f85003);
usleep_range(300, 600);
/* Program USB PHY PLL parameters */
bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x6);
bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x005360c1);
usleep_range(100, 200);
bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_ADDR, 0x7);
bcma_write32(pmu_core, BCMA_CC_PMU_PLLCTL_DATA, 0x0);
usleep_range(100, 200);
bcma_set32(pmu_core, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
usleep_range(100, 200);
bcma_write32(core, 0x510, 0x7f8d007);
udelay(1000);
/* Take controller out of reset */
bcma_write32(core, 0x200, 0x4ff);
usleep_range(25, 50);
bcma_write32(core, 0x200, 0x6ff);
usleep_range(25, 50);
bcma_write32(core, 0x200, 0x7ff);
usleep_range(25, 50);
of_platform_default_populate(dev->of_node, NULL, dev);
return 0;
}
static void bcma_hcd_usb20_ns_init_hc(struct bcma_device *dev)
{
u32 val;
/* Set packet buffer OUT threshold */
val = bcma_read32(dev, 0x94);
val &= 0xffff;
val |= 0x80 << 16;
bcma_write32(dev, 0x94, val);
/* Enable break memory transfer */
val = bcma_read32(dev, 0x9c);
val |= 1;
bcma_write32(dev, 0x9c, val);
/*
* Broadcom initializes PHY and then waits to ensure HC is ready to be
* configured. In our case the order is reversed. We just initialized
* controller and we let HCD initialize PHY, so let's wait (sleep) now.
*/
usleep_range(1000, 2000);
}
/*
* bcma_hcd_usb20_ns_init - Initialize Northstar USB 2.0 controller
*/
static int bcma_hcd_usb20_ns_init(struct bcma_hcd_device *bcma_hcd)
{
struct bcma_device *core = bcma_hcd->core;
struct bcma_chipinfo *ci = &core->bus->chipinfo;
struct device *dev = &core->dev;
bcma_core_enable(core, 0);
if (ci->id == BCMA_CHIP_ID_BCM4707 ||
ci->id == BCMA_CHIP_ID_BCM53018)
bcma_hcd_usb20_ns_init_hc(core);
of_platform_default_populate(dev->of_node, NULL, dev);
return 0;
}
static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
{
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
if (!usb_dev->gpio_desc)
return;
gpiod_set_value(usb_dev->gpio_desc, val);
}
static const struct usb_ehci_pdata ehci_pdata = {
};
static const struct usb_ohci_pdata ohci_pdata = {
};
static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev,
const char *name, u32 addr,
const void *data,
size_t size)
{
struct platform_device *hci_dev;
struct resource hci_res[2];
int ret;
memset(hci_res, 0, sizeof(hci_res));
hci_res[0].start = addr;
hci_res[0].end = hci_res[0].start + 0x1000 - 1;
hci_res[0].flags = IORESOURCE_MEM;
hci_res[1].start = dev->irq;
hci_res[1].flags = IORESOURCE_IRQ;
hci_dev = platform_device_alloc(name, 0);
if (!hci_dev)
return ERR_PTR(-ENOMEM);
hci_dev->dev.parent = &dev->dev;
hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
ret = platform_device_add_resources(hci_dev, hci_res,
ARRAY_SIZE(hci_res));
if (ret)
goto err_alloc;
if (data)
ret = platform_device_add_data(hci_dev, data, size);
if (ret)
goto err_alloc;
ret = platform_device_add(hci_dev);
if (ret)
goto err_alloc;
return hci_dev;
err_alloc:
platform_device_put(hci_dev);
return ERR_PTR(ret);
}
static int bcma_hcd_usb20_init(struct bcma_hcd_device *usb_dev)
{
struct bcma_device *dev = usb_dev->core;
struct bcma_chipinfo *chipinfo = &dev->bus->chipinfo;
u32 ohci_addr;
int err;
if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
bcma_hcd_init_chip_mips(dev);
/* In AI chips EHCI is addrspace 0, OHCI is 1 */
ohci_addr = dev->addr_s[0];
if ((chipinfo->id == BCMA_CHIP_ID_BCM5357 ||
chipinfo->id == BCMA_CHIP_ID_BCM4749)
&& chipinfo->rev == 0)
ohci_addr = 0x18009000;
usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, "ohci-platform",
ohci_addr, &ohci_pdata,
sizeof(ohci_pdata));
if (IS_ERR(usb_dev->ohci_dev))
return PTR_ERR(usb_dev->ohci_dev);
usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, "ehci-platform",
dev->addr, &ehci_pdata,
sizeof(ehci_pdata));
if (IS_ERR(usb_dev->ehci_dev)) {
err = PTR_ERR(usb_dev->ehci_dev);
goto err_unregister_ohci_dev;
}
return 0;
err_unregister_ohci_dev:
platform_device_unregister(usb_dev->ohci_dev);
return err;
}
static int bcma_hcd_usb30_init(struct bcma_hcd_device *bcma_hcd)
{
struct bcma_device *core = bcma_hcd->core;
struct device *dev = &core->dev;
bcma_core_enable(core, 0);
of_platform_default_populate(dev->of_node, NULL, dev);
return 0;
}
static int bcma_hcd_probe(struct bcma_device *core)
{
int err;
struct bcma_hcd_device *usb_dev;
/* TODO: Probably need checks here; is the core connected? */
usb_dev = devm_kzalloc(&core->dev, sizeof(struct bcma_hcd_device),
GFP_KERNEL);
if (!usb_dev)
return -ENOMEM;
usb_dev->core = core;
usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc",
GPIOD_OUT_HIGH);
if (IS_ERR(usb_dev->gpio_desc))
return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc),
"error obtaining VCC GPIO");
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
if (IS_ENABLED(CONFIG_ARM))
err = bcma_hcd_usb20_old_arm_init(usb_dev);
else if (IS_ENABLED(CONFIG_MIPS))
err = bcma_hcd_usb20_init(usb_dev);
else
err = -ENOTSUPP;
break;
case BCMA_CORE_NS_USB20:
err = bcma_hcd_usb20_ns_init(usb_dev);
break;
case BCMA_CORE_NS_USB30:
err = bcma_hcd_usb30_init(usb_dev);
break;
default:
return -ENODEV;
}
if (err)
return err;
bcma_set_drvdata(core, usb_dev);
return 0;
}
static void bcma_hcd_remove(struct bcma_device *dev)
{
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
struct platform_device *ohci_dev = usb_dev->ohci_dev;
struct platform_device *ehci_dev = usb_dev->ehci_dev;
if (ohci_dev)
platform_device_unregister(ohci_dev);
if (ehci_dev)
platform_device_unregister(ehci_dev);
bcma_core_disable(dev, 0);
}
static void bcma_hcd_shutdown(struct bcma_device *dev)
{
bcma_hci_platform_power_gpio(dev, false);
bcma_core_disable(dev, 0);
}
#ifdef CONFIG_PM
static int bcma_hcd_suspend(struct bcma_device *dev)
{
bcma_hci_platform_power_gpio(dev, false);
bcma_core_disable(dev, 0);
return 0;
}
static int bcma_hcd_resume(struct bcma_device *dev)
{
bcma_hci_platform_power_gpio(dev, true);
bcma_core_enable(dev, 0);
return 0;
}
#else /* !CONFIG_PM */
#define bcma_hcd_suspend NULL
#define bcma_hcd_resume NULL
#endif /* CONFIG_PM */
static const struct bcma_device_id bcma_hcd_table[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB20, BCMA_ANY_REV, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB30, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
static struct bcma_driver bcma_hcd_driver = {
.name = KBUILD_MODNAME,
.id_table = bcma_hcd_table,
.probe = bcma_hcd_probe,
.remove = bcma_hcd_remove,
.shutdown = bcma_hcd_shutdown,
.suspend = bcma_hcd_suspend,
.resume = bcma_hcd_resume,
};
module_bcma_driver(bcma_hcd_driver);
| linux-master | drivers/usb/host/bcma-hcd.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.